source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
networkcmd.py
|
#!/usr/bin/env python
"""
Copyright 2018 Allan Brand
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'Allan Brand'
__copyright__ = 'Copyright 2018'
__credits__ = ['Allan Brand']
__license__ = 'Apache v2.0'
__version__ = '0.9.0'
__maintainer__ = 'Allan Brand'
__email__ = 'allan.brand@gmail.com'
__status__ = 'Development'
import argparse
from getpass import getpass
import time, os, signal
from netmiko import Netmiko
from netmiko import ssh_exception
import threading
from queue import Queue
#
# Initialize some variables
###############################################
dev_list = []
cmd_list = []
nThreads = 8
eQueue = Queue()
tLock = threading.Lock()
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
if args.port:
PORT = args.port
else:
PORT = 22
if args.wait:
CMD_DELAY = args.wait
else:
CMD_DELAY = 1
#
# Configure Command-line Arguments
###############################################
parser = argparse.ArgumentParser()
grpCmd = parser.add_mutually_exclusive_group(required=True)
grpHst = parser.add_mutually_exclusive_group(required=True)
parser.add_argument('-u', '--username',
help='define the username')
parser.add_argument('-P', '--protocol',
required=False,
choices=['ssh', 'telnet'],
default='ssh',
help='define connection protocol')
parser.add_argument('-p', '--port',
required=False,
type=int,
choices=range(0,65535),
help='define the port number to connect to')
parser.add_argument('-w', '--wait',
required=False,
type=int,
help='define delay time for the next prompt')
parser.add_argument('-M', '--multithread',
help='process commands on multiple devices simultaneously')
parser.add_argument('-l', '--log',
required=False,
help='define a logfile prefix')
#
# Functionality to be added later
# parser.add_argument('-a', '--append',
# required=False,
# action='store_true',
# help='log will be appended to existing file')
grpCmd.add_argument('-c', '--cmd',
help='define the command to send')
grpCmd.add_argument('-r', '--runfile',
help='define a file with a set of command to send')
grpHst.add_argument('-t', '--target',
help='define the hostname to connect')
grpHst.add_argument('-T', '--targetfile',
help='define a target file (one host per line)')
args = parser.parse_args()
def single_SSH(ip):
try:
conn = Netmiko(host=ip, device_type='autodetect', username=uname, password=pword, auth_timeout=60, session_log=ip + '.log')
conn.find_prompt()
for cmd in cmd_list:
conn.send_command(cmd)
time.sleep(CMD_DELAY)
conn.disconnet()
except ssh_exception.NetmikoTimeoutException:
continue
except ssh_exception.NetmikoAuthentionException:
os.kill(os.getpid(), signal.SIGUSR1)
def threaded_SSH(i, q):
while True:
ip = q.get()
try:
conn = Netmiko(host=ip, device_type='autodetect', username=uname, password=pword, auth_timeout=60, session_log=ip + '.log')
conn.find_prompt()
for cmd in cmd_list:
conn.send_command(cmd)
time.sleep(CMD_DELAY)
conn.disconnet()
except ssh_exception.NetmikoTimeoutException:
q.task_done()
continue
except ssh_exception.NetmikoAuthentionException:
q.task_done()
os.kill(os.getpid(), signal.SIGUSR1)
q.task_done()
def single_Telnet(ip):
t = telnetlib.Telnet(ip)
t.read_until(b'Username:')
t.write(uname.encode('ascii') + b'\n')
if pword:
t.read_until(b'Password:')
t.write(pword.encode('ascii') + b'\n')
for cmd in cmd_list:
t.write(b'{}\n'.format(cmd))
time.sleep(CMD_DELAY)
print(t.read_all().decode('ascii'))
def threaded_Telnet(i, q):
def Threaded_Operation():
for i in range(nThreads):
thread = threading.Thread(target=threaded_SSH, args=(i, eQueue,))
thread.daemon = True
thread.start()
for d in list_dev:
eQueue.put(d)
eQueue.join()
if__name__== "__main__":
#
# Gather login credentials
###############################################
if args.username:
uname = args.username
else:
uname = input('Username: ')
pword = getpass.getpass()
#
# Gather targets to touch
###############################################
if args.target:
dev_list.append(args.target)
else:
with open(args.targetfile) as f:
for line in f:
dev_list.append(line)
#
# Gather commands to execute
###############################################
if args.cmd:
cmd_list.append(args.cmd)
else:
with open(args.targetfile) as f:
for line in f:
cmd_list.append(line)
#
# Process commands
###############################################
if args.protocol == 'telnet':
import telnetlib
if args.multithread:
Threaded_Telnet()
else:
single_Telnet(args.target)
else:
if args.multithread:
Threaded_Operation()
else:
single_SSH(args.target)
|
dmm.py
|
# -*- coding: utf-8 -*-
import re, os
import threading
import time
from jinja2 import PackageLoader,Environment
from bs4 import BeautifulSoup
from queue import Queue
#from lxml import etree
from app.utils.func_requests import get_html_jp
from app.utils import Loadconfig
from selenium import webdriver
from app.utils.func_handler import logger
#返回页码
def findinfo(articleid,mode='uid'):
if mode=='link':
url = articleid
html = get_html_jp(url)
page1 = re.findall(r'/digital/videoa/-/list/=/.*/id=\d+/page=(\d+)/',html)
title = re.findall(r'<title>(.*) - エロ動画・アダルトビデオ - FANZA動画</title>',html)
elif mode=='uid':
url = "https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id=%s/" %articleid
html = get_html_jp(url)
page1 = re.findall(r'/digital/videoa/-/list/=/article=actress/id=\d+/page=(\d+)/',html)
title = re.findall(r'<title>(.*) - エロ動画・アダルトビデオ - FANZA動画</title>',html)
if page1 == []:
page1 = 1
else:
page3 = []
for i in page1:
if i not in page3:
page3.append(int(i))
page4 = max(page3)
page1 = page4
return (page1,title[0])
#生成网址
def producer(in_q,articleid, page,mode='uid'):
if mode=='uid':
url1 = "https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id={}/".format(articleid)
in_q.put(url1)
for i in range(2, int(page)+1):
url = "https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id={}/page={}/".format(articleid,i)
#print(url)
in_q.put(url)
elif mode=='link':
if articleid[-1]=='/':
url = articleid
else:
url = articleid +'/'
in_q.put(url)
for i in range(2, int(page)+1):
url1 = url + 'page={}/'.format(i)
in_q.put(url1)
#一页进程
def dmmcid(in_q, out_q):
while in_q.empty() is not True:
url = in_q.get()
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/=/article=actress/id=1060823/'
html = get_html_jp(url)
list = re.findall(r'https://www.dmm.co.jp/digital/videoa/-/detail/=/cid=([_0-9a-z]+)/',html)
#print(url,list)
out_q.append(list)
in_q.task_done()
#多线程收集
def dmm_thread(articleid):
start = time.time()
queue = Queue()
result_queue = []
matchline = re.search( r'https|www.dmm.co.jp', articleid, re.M|re.I)
if matchline:
page, title = findinfo(articleid,mode='link')
mode = 'link'
producer_thread = threading.Thread(target=producer, args=(queue, articleid, page,mode))
#producer_thread.daemon = True
producer_thread.start()
else:
page, title = findinfo(articleid)
producer_thread = threading.Thread(target=producer, args=(queue, articleid, page))
#producer_thread.daemon = True
producer_thread.start()
for index in range(int(page)+1):
consumer_thread = threading.Thread(target=dmmcid, args=(queue, result_queue))
consumer_thread.daemon = True
consumer_thread.start()
#print('开启线程数:' + str(threading.active_count()))
queue.join()
#print(page,result_queue)
resetlist = []
for i in result_queue:
try:
for n in i:
if n not in resetlist:
resetlist.append(n)
except TypeError:
if i not in resetlist:
resetlist.append(i)
#print(resetlist)
leng = len(resetlist)
alllist = ','.join(resetlist)
end = time.time()
usetime = str(end - start)
result = '%s - エロ動画・アダルトビデオ - FANZA動画\n(%s page)(%s cid) list =>\n%s' % (title,page,leng,alllist)
return (result,usetime)
#收集信息
def ciddata(html):
notitle = 0
soup = BeautifulSoup(html,'lxml')
#print(soup)
try:
ifresult = re.findall(r'(指定されたページが見つかりません)', html)
noresult = '指定されたページが見つかりません'
except:
pass
try:
if noresult in ifresult:
notitle = 1
return (noresult, notitle)
except Exception:
pass
ciddata = {}
perfordata = {}
allper = soup.find(name='span', id='performer').find_all(href=re.compile("actress"))
for i in allper:
id = re.findall(r'/digital/videoa/-/list/=/article=actress/id=(.*)/',i.get('href'))
perfordata[id[0]] = i.string
if perfordata != None:
ciddata['performers'] = perfordata
else:
ciddata['performers'] = '---'
body = soup.find('table', attrs = {'class':'mg-b20'})
allkey = body.find_all('a', href = re.compile('article=keyword'))
keyworddata = {}
for i in allkey:
id = re.findall(r'/digital/videoa/-/list/=/article=keyword/id=(.*)/',i.get('href'))
keyworddata[id[0]] = i.string
if perfordata != None:
ciddata['keyword'] = keyworddata
else:
ciddata['keyword'] = '---'
scoregif = body.find_all('img')
try:
score = re.findall(r'https://.*/(\d)_?(\d)?.gif',str(scoregif))[0]
ciddata['score'] = score
except:
ciddata['score'] = 'none'
try:
redkey = re.findall(r'<span class="red">(.*)</span>',html)[0]
titlebig = re.findall(r'<title>(.*)</title>',html)[0]
ciddata['title'] = redkey + ' ' + titlebig
except:
ciddata['title'] = '---'
notitle = 1
try:
ciddata['fanart_img'] = re.findall(r'<a href=\"(.*)\" target=\"_package\" name=\"package-image\"',html)[0]
except:
ciddata['fanart_img'] = '---'
try:
ciddata['distribute'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">配信開始日:</td>\n?<td>\n?(.*)</td>',html)[0]
except:
ciddata['distribute'] = '---'
try:
ciddata['release'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">商品発売日:</td>\n?<td>\n?(.*)</td>',html)[0]
except:
ciddata['release'] = '---'
try:
ciddata['time'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">収録時間:</td>\n?<td>(.*\n?.*)',html)[0].replace('\n','').replace('</td></tr>','')
except:
ciddata['time'] = '---'
try:
director = body.find_all('a', href = re.compile('article=director'))[0]
ciddata['director'] = director.string
id = re.findall(r'/digital/videoa/-/list/=/article=director/id=(.*)/',director.get('href'))[0]
ciddata['directorid'] = id
except:
ciddata['director'] = '---'
ciddata['directorid'] = '---'
try:
series = body.find_all('a', href = re.compile('article=series'))[0]
ciddata['series'] = series.string
id = re.findall(r'/digital/videoa/-/list/=/article=series/id=(.*)/',series.get('href'))[0]
ciddata['seriesid'] = id
except:
ciddata['series'] = '---'
ciddata['seriesid'] = '---'
try:
maker = body.find_all('a', href = re.compile('article=maker'))[0]
ciddata['maker'] = maker.string
id = re.findall(r'/digital/videoa/-/list/=/article=maker/id=(.*)/',maker.get('href'))[0]
ciddata['makerid'] = id
except:
ciddata['maker'] = '---'
ciddata['makerid'] = '---'
try:
label = body.find_all('a', href = re.compile('article=label'))[0]
ciddata['label'] = label.string
id = re.findall(r'/digital/videoa/-/list/=/article=label/id=(.*)/',label.get('href'))[0]
ciddata['labelid'] = id
except:
ciddata['label'] = '---'
ciddata['labelid'] = '---'
try:
ciddata['cid'] = re.findall(r'<td align=\"right\" valign=\"top\" class=\"nw\">品番:</td>[\s\S]*?<td>(.*?)</td>',html)[0]
except:
ciddata['cid'] = '---'
return (ciddata,notitle)
def template_cid(ciddataa):
ciddataa_performers = ciddataa.get('performers')
ciddataa_keyword = ciddataa.get('keyword')
#print(ciddataa_performers)
env = Environment(loader=PackageLoader(__name__,"templates")) # 创建一个包加载器对象
template = env.get_template('cid.md') # 获取一个模板文件
temp_out = template.render(ciddata = ciddataa, ciddata_performers = ciddataa_performers, ciddata_keyword = ciddataa_keyword)
#print(temp_out) # 渲染
return (temp_out)
#print(Substitute)
def dmmonecid(searchcid):
searchcid = searchcid.replace('-','00')
searchurl = 'https://www.dmm.co.jp/digital/videoa/-/detail/=/cid={}/'.format(searchcid)
html = get_html_jp(searchurl)
ciddataa,notitle = ciddata(html)
if ciddataa == '指定されたページが見つかりません':
return ciddataa,notitle
temp_out = template_cid(ciddataa)
return temp_out, notitle
def precid(searchcid):
searchurl = 'https://www.dmm.co.jp/mono/dvd/-/detail/=/cid={}/'.format(searchcid)
html = get_html_jp(searchurl)
soup = BeautifulSoup(html,'lxml')
title = soup.title.string
body = soup.find('table', attrs = {'class':'mg-b20'})
title = soup.title.string
red = soup.find('span',attrs = {'class' : 'red'}).string
title = red + title
photo = soup.find('div',attrs = {'class' : 'tx10 pd-3 lh4'}).a.get('href')
pushdate = body.find_all('tr')[0].find('td').find_next_sibling().string
time = body.find_all('tr')[1].find('td').find_next_sibling().string
performer = body.find_all('tr')[2].find('td').find_next_sibling().span.a.string
num = body.find_all('tr')[-1].find('td').find_next_sibling().string
text = '''
`{}`
[DVD ]({})
*発売日:*{}
*収録時間:*{}
*出演者:*{}
*品番:* `{}`
[官方信息]({})
'''.format(title,photo,pushdate,time,performer,num,searchurl)
return text
def dmmsearch_data(searchstr):
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr=乙白さやか'
url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr={}'.format(searchstr)
html = get_html_jp(url)
#判断有无结果
try:
result = re.findall(r'(選択した条件で商品は存在しませんでした)',html)
noresult = '選択した条件で商品は存在しませんでした'
except:
pass
try:
if noresult in result:
stitle = 1
return (noresult,stitle)
except Exception:
pass
soup = BeautifulSoup(html,'lxml')
searchbody = soup.find('div',attrs = {'class' : 'd-area'})
try:
stitle = re.findall(r'<title>(.*?)</title>',html)[0]
except Exception:
stitle = '検索結果'
boxall = searchbody.find_all('li',attrs = {'style' : 'width: 130px;'})
onebox = str(boxall).split('</div></li>')
boxlist = []
for box in onebox:
boxdict = {}
notitle = 0
if box:
try:
litetitle = re.findall(r'<span class=\"txt\">(.*?)</span>',box)[0]
#print(litetitle)
if litetitle == None:
notitle = 1
except:
notitle = 1
try:
cid = re.findall(r'https://www\.dmm\.co\.jp/.*?/cid=(\w+)/',box)[0]
boxdict['cid'] = cid
except Exception as e:
boxdict['cid'] = '-'
try:
keywords = re.findall(r'<span class=\"ico-st-\w+\"><span>(.*?)</span></span>',box)
keyword = ','.join(keywords)
boxdict['keyword'] = keyword
except:
boxdict['keyword'] = '-'
try:
links = re.findall(r'(https://www\.dmm\.co\.jp/.*?/cid=\w+/)',box)[0]
boxdict['links'] = links
except:
boxdict['links'] = '-'
try:
img = re.findall(r'<span class=\"img\"><img alt=\".*?\" src=\"(https://pics.dmm.co.jp/digital/video/\w+/\w+.jpg)\"/></span>',box)
boxdict['img'] = img[0]
except:
boxdict['img'] = '-'
try:
title = re.findall(r'<span class=\"img\"><img alt=\"(.*?)\" src=\"https://pics.dmm.co.jp/digital/video/\w+/\w+.jpg\"/></span>',box)
boxdict['title'] = title[0]
except:
boxdict['title'] = '-'
try:
sublinks = re.findall(r'span><a href=\"(.*?)\">.*?</a></span>',box)
sublink = 'https://www.dmm.co.jp' + sublinks[0]
boxdict['sublinks'] = sublink
except:
boxdict['sublinks'] = '-'
try:
subtexts = re.findall(r'<span><a href=\".*?\">(.*?)</a></span>',box)
boxdict['subtexts'] = subtexts[0]
except:
boxdict['subtexts'] = '-'
if notitle == 0:
#print(boxdict)
boxlist.append(boxdict)
return (boxlist,stitle)
def template_search(resultdataa,stitlee):
env = Environment(loader=PackageLoader(__name__,"templates")) # 创建一个包加载器对象
template = env.get_template('search.md') # 获取一个模板文件
temp_out = template.render(resultdata = resultdataa,stitle = stitlee)
#print(temp_out) # 渲染
return (temp_out)
def dmmsearch(searchstr,mode='temp'):
result, stitle = dmmsearch_data(searchstr)
if mode == 'onlysearch':
return result, stitle
noresult = '選択した条件で商品は存在しませんでした'
if result == noresult:
try_all = dmmsearchall(searchstr)
result = 'dmm动画无结果,尝试dmm全站搜索:\n%s'%try_all
return result
temp_out = template_search(result, stitle)
return temp_out
def dmmlinks_data(links):
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr=乙白さやか'
url = links
html = get_html_jp(url)
#判断有无结果
soup = BeautifulSoup(html,'lxml')
searchbody = soup.find('div',attrs = {'class' : 'd-area'})
try:
stitle = re.findall(r'<title>(.*?)</title>',html)[0]
#print(stitle)
except Exception:
stitle = '検索結果'
boxall = searchbody.find_all('li',attrs = {'style' : 'width: 130px;'})
onebox = str(boxall).split('</div></li>')
boxlist = []
for box in onebox:
boxdict = {}
notitle = 0
if box:
try:
litetitle = re.findall(r'<span class=\"txt\">(.*?)</span>', box)[0]
# print(litetitle)
if litetitle == None:
notitle = 1
except:
notitle = 1
try:
cid = re.findall(r'https://www\.dmm\.co\.jp/.*?/cid=(\w+)/', box)[0]
boxdict['cid'] = cid
except Exception as e:
boxdict['cid'] = '-'
try:
keywords = re.findall(r'<span class=\"ico-\w+-\w+\"><span>(.*?)</span></span>', box)
keyword = ','.join(keywords)
boxdict['keyword'] = keyword
except:
boxdict['keyword'] = '-'
try:
links = re.findall(r'(https://www\.dmm\.co\.jp/.*?/cid=\w+/)', box)[0]
boxdict['links'] = links
except:
boxdict['links'] = '-'
try:
img = re.findall(r'(pics\.dmm\.co\.jp/.*?/\w+/\w+.jpg)',box)
boxdict['img'] = img[0]
except:
boxdict['img'] = '-'
try:
title = re.findall(
r'alt=\"(.*)\" src',
box)
boxdict['title'] = title[0]
except:
boxdict['title'] = '-'
try:
sublinks = re.findall(r'span><a href=\"(.*?)\">.*?</a></span>', box)
sublink = 'https://www.dmm.co.jp' + sublinks[0]
boxdict['sublinks'] = sublink
except:
boxdict['sublinks'] = '-'
try:
subtexts = re.findall(r'<span><a href=\".*?\">(.*?)</a></span>', box)
boxdict['subtexts'] = subtexts[0]
except:
boxdict['subtexts'] = '-'
if notitle == 0:
#print(boxdict)
boxlist.append(boxdict)
return (boxlist,stitle)
def template_links(resultdataa,stitlee):
env = Environment(loader=PackageLoader(__name__,"templates")) # 创建一个包加载器对象
template = env.get_template('search.md') # 获取一个模板文件
temp_out = template.render(resultdata = resultdataa,stitle = stitlee)
#print(temp_out) # 渲染
return (temp_out)
def dmmlinks(links):
result, stitle = dmmlinks_data(links)
#print(result, stitle)
temp_out = template_links(result, stitle)
return temp_out
#多媒体收集
def prevideo(searchcid):
video1 = searchcid[0]
video3 = searchcid[0:3]
videobase = 'https://cc3001.dmm.co.jp/litevideo/freepv/{}/{}/{}/{}_dmb_w.mp4'.format(video1,video3,searchcid,searchcid)
return videobase
def prevideolow(searchcid):
video1 = searchcid[0]
video3 = searchcid[0:3]
videobase = 'https://cc3001.dmm.co.jp/litevideo/freepv/{}/{}/{}/{}_sm_w.mp4'.format(video1,video3,searchcid,searchcid)
return videobase
def prephotos(searchurl):
#print(searchurl)
html = get_html_jp(searchurl)
soup = BeautifulSoup(html,'lxml')
photourlss = soup.find_all('img', attrs = {'class':'mg-b6'})
photourls = re.findall(r'(https://pics.dmm.co.jp/digital/video/.*?/.*?.jpg)', str(photourlss))
photolist = list(photourls)
#print(photolist)
jpg = []
for i in photolist:
ii = list(i)
ii.insert(-6,'jp')
iii = ''.join(ii)
iii = iii.replace('-jp','jp-',1)
jpg.append(iii)
return (jpg)
def truevideo(searchcid):
ifproxy = Loadconfig.ifproxy
proxy = Loadconfig.proxy
system = Loadconfig.system
# 进入浏览器设置
options = webdriver.ChromeOptions()
#谷歌无头模式
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--no-sandbox')
options.add_argument('--disable-software-rasterizer ')
# 设置语言
options.add_argument('lang=ja_JP.UTF-8')
# 更换头部
options.add_argument('user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36"')
#设置代理
if ifproxy == 'true':
options.add_argument('proxy-server=' + proxy)
if system == 'Linux':
try:
browser = webdriver.Chrome(options=options)
except:
browser = webdriver.Chrome(executable_path=os.path.abspath(os.path.join("app", "bin","chromedriver")),options=options)
elif system == 'Windows':
browser = webdriver.Chrome(executable_path=os.path.abspath(os.path.join("app", "bin","chromedriver.exe")),options=options)
#browser.set_page_load_timeout(5)
browser.implicitly_wait(5)
try:
url = 'https://www.dmm.co.jp/digital/videoa/-/detail/ajax-movie/=/cid={}'.format(searchcid)
browser.get(url)
browser.switch_to.default_content()
browser.switch_to.frame('DMMSample_player_now')
video = browser.find_element_by_xpath("//video[@data-binding='play']")
videourl = browser.execute_script("return arguments[0].currentSrc;",video)
#print(browser.page_source)
except Exception as e:
logger.error('video区获取失败:'+ str(e))
try:
url = 'https://www.dmm.co.jp/mono/dvd/-/detail/ajax-movie/=/cid={}'.format(searchcid)
browser.get(url)
checkbox = browser.find_element_by_css_selector("[class='ageCheck__link ageCheck__link--r18']")
checkbox.click()
browser.switch_to.default_content()
browser.switch_to.frame('DMMSample_player_now')
video = browser.find_element_by_xpath("//video[@data-binding='play']")
videourl = browser.execute_script("return arguments[0].currentSrc;",video)
except Exception as e:
logger.error('dvd区获取失败'+ str(e))
logger.info('selenium:'+ videourl)
browser.quit()
return videourl
def dmmsearchall_data(searchstr):
#url = 'https://www.dmm.co.jp/digital/videoa/-/list/search/=/?searchstr=乙白さやか'
url = 'https://www.dmm.co.jp/search/=/searchstr={}/sort=rankprofile/'.format(searchstr)
html = get_html_jp(url)
#判断有无结果
result = re.findall(r'(に一致する商品は見つかりませんでした。)',html)
noresult = 'に一致する商品は見つかりませんでした。'
try:
if noresult in result:
stitle = 1
return (noresult,stitle)
except Exception:
pass
soup = BeautifulSoup(html,'lxml')
searchbody = soup.find('div',attrs = {'class' : 'd-area'})
try:
stitle = re.findall(r'<title>(.*?)</title>',html)[0]
except Exception:
stitle = '検索結果'
boxall = searchbody.find('div',attrs = {'class' : 'd-sect'})
onebox = str(boxall).split('<div>')
boxlist = []
for box in onebox:
boxdict = {}
notitle = 0
if box:
try:
litetitle = re.findall(r'<span class=\"txt\">(.*?)</span>',box)[0]
#print(litetitle)
if litetitle == None:
notitle = 1
except:
notitle = 1
try:
cid = re.findall(r'<a href=\"https://www\.dmm\.co\.jp/.*?/cid=(\w+)/\?.*?\">',box)[0]
boxdict['cid'] = cid
except:
boxdict['cid'] = '-'
try:
keywords = re.findall(r'<span class=\"ico-\w+-\w+\"><span>(.*?)</span></span>',box)
keyword = ','.join(keywords)
boxdict['keyword'] = keyword
except:
boxdict['keyword'] = '-'
try:
links = re.findall(r'<a href=\"(https://www\.dmm\.co\.jp/.*?-/detail/=/cid=\w+/\?.*?)\">',box)[0]
boxdict['links'] = links
except:
boxdict['links'] = '-'
try:
img = re.findall(r'(pics\.dmm\.co\.jp/.*?/\w+/\w+.jpg)',box)[0]
boxdict['img'] = img
except Exception as e:
boxdict['img'] = '-'
try:
title = re.findall(r'alt=\"(.*)\" src',box)[0]
boxdict['title'] = title
except Exception as e:
boxdict['title'] = '-'
try:
sublinks = re.findall(r'<span><a href=\"(.*?)\">.*?</a></span>',box)
boxdict['sublinks'] = sublinks[0]
except Exception as e:
boxdict['sublinks'] = '-'
try:
subtexts = re.findall(r'<span><a href=\".*?\">(.*?)</a></span>',box)[0]
boxdict['subtexts'] = subtexts
except:
boxdict['subtexts'] = '-'
if notitle == 0:
#print(boxdict)
boxlist.append(boxdict)
return (boxlist,stitle)
def template_searchall(resultdataa,stitlee):
env = Environment(loader=PackageLoader(__name__,"templates")) # 创建一个包加载器对象
template = env.get_template('searchall.md') # 获取一个模板文件
temp_out = template.render(resultdata = resultdataa,stitle = stitlee)
#print(temp_out) # 渲染
return (temp_out)
def dmmsearchall(searchstr,mode='temp'):
result, stitle = dmmsearchall_data(searchstr)
if mode == 'onlysearch':
return result, stitle
noresult = 'に一致する商品は見つかりませんでした。'
if result == noresult:
noresult = '全站搜索无结果:%s\n建议关键词之间多加空格,番号字母与数字之间加空格重新搜索'%noresult
return noresult
temp_out = template_searchall(result, stitle)
return temp_out
|
test_tcp.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Thomas Jackson <jacksontj.89@gmail.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import threading
import tornado.gen
import tornado.ioloop
from tornado.testing import AsyncTestCase
import salt.config
import salt.ext.six as six
import salt.utils
import salt.transport.server
import salt.transport.client
import salt.exceptions
# Import Salt Testing libs
from tests.support.unit import TestCase, skipIf
import tests.integration as integration
# Import Salt libs
from tests.unit.transport.test_req import ReqChannelMixin
from tests.unit.transport.test_pub import PubChannelMixin
# TODO: move to a library?
def get_config_file_path(filename):
return os.path.join(integration.TMP, 'config', filename)
class BaseTCPReqCase(TestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
if not hasattr(cls, '_handle_payload'):
return
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = tornado.ioloop.IOLoop()
cls.io_loop.make_current()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(target=cls.io_loop.start)
cls.server_thread.daemon = True
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
if not hasattr(cls, '_handle_payload'):
return
cls.io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.server_channel.close()
del cls.server_channel
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS')
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
'''
Test all of the clear msg stuff
'''
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts, crypt='clear')
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send_clear'}))
@skipIf(salt.utils.is_darwin(), 'hanging test suite on MacOS')
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_opts)
@classmethod
@tornado.gen.coroutine
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
raise tornado.gen.Return((payload, {'fun': 'send'}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
def test_badload(self):
'''
Test a variety of bad requests, make sure that we get some sort of error
'''
msgs = ['', [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase):
'''
Test the req server/client pair
'''
@classmethod
def setUpClass(cls):
cls.master_opts = salt.config.master_config(get_config_file_path('master'))
cls.master_opts.update({
'transport': 'tcp',
'auto_accept': True,
})
cls.minion_opts = salt.config.minion_config(get_config_file_path('minion'))
cls.minion_opts.update({
'transport': 'tcp',
'master_ip': '127.0.0.1',
'auth_timeout': 1,
'master_uri': 'tcp://127.0.0.1:{0}'.format(cls.minion_opts['master_port']),
})
cls.process_manager = salt.utils.process.ProcessManager(name='ReqServer_ProcessManager')
cls.server_channel = salt.transport.server.PubServerChannel.factory(cls.master_opts)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(cls.master_opts)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = tornado.ioloop.IOLoop()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls._server_io_loop)
cls.server_thread = threading.Thread(target=cls._server_io_loop.start)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
'''
TODO: something besides echo
'''
return payload, {'fun': 'send_clear'}
@classmethod
def tearDownClass(cls):
cls._server_io_loop.stop()
cls.server_thread.join()
cls.process_manager.kill_children()
cls.req_server_channel.close()
del cls.req_server_channel
def setUp(self):
super(BaseTCPPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseTCPPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@skipIf(True, 'Skip until we can devote time to fix this test')
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
'''
Tests around the publish system
'''
|
worker_command.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Worker command"""
import os
import signal
import sys
from multiprocessing import Process
from typing import Optional
import daemon
from daemon.pidfile import TimeoutPIDLockFile
from airflow import settings
from airflow.configuration import conf
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging, sigint_handler
from airflow.utils.serve_logs import serve_logs
def _serve_logs(skip_serve_logs: bool = False) -> Optional[Process]:
"""Starts serve_logs sub-process"""
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
return sub_proc
return None
@cli_utils.action_logging
def worker(args):
"""Starts Airflow Celery worker"""
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
if not settings.validate_session():
print("Worker exiting... database connection precheck failed! ")
sys.exit(1)
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker # pylint: disable=redefined-outer-name
autoscale = args.autoscale
skip_serve_logs = args.skip_serve_logs
if autoscale is None and conf.has_option("celery", "worker_autoscale"):
autoscale = conf.get("celery", "worker_autoscale")
worker = worker.worker(app=celery_app) # pylint: disable=redefined-outer-name
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
'concurrency': args.concurrency,
'autoscale': autoscale,
'hostname': args.celery_hostname,
'loglevel': conf.get('core', 'LOGGING_LEVEL'),
}
if conf.has_option("celery", "pool"):
options["pool"] = conf.get("celery", "pool")
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("worker",
args.pid,
args.stdout,
args.stderr,
args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
sub_proc = _serve_logs(skip_serve_logs)
worker.run(**options)
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
sub_proc = _serve_logs(skip_serve_logs)
worker.run(**options)
if sub_proc:
sub_proc.terminate()
|
mainProducer.py
|
import threading
from socketProducer import SocketProducer
from rabbitPublishFromFile import RabbitPublishFromFile
from socketPublishFromFile import SocketPublishFromFile
from folderListener import Listener
from pika.exchange_type import ExchangeType
from serverSocket import Server
import sys
from rabbitProducer import RabbitProducer
from extractFiles import ExtractFiles
if __name__ == '__main__':
extractor_files = ExtractFiles()
exchange = 'exchange1'
routing_key = ''
exchange_type = ExchangeType.topic
_dir = r"C:\dev\integratedSystem\images"
backup_dir = r"C:\dev\integratedSystem\all_images"
_chunk_size = 20000
work_with = sys.argv[1]
threads = []
producers = []
publishers = []
if work_with == 'rabbit':
for i in range(1, 3):
producer = RabbitProducer(exchange, routing_key, exchange_type)
producers.append(producer)
publishers.append(RabbitPublishFromFile(i, backup_dir, _chunk_size, producer))
elif work_with == 'socket':
server = Server()
threads.append(threading.Thread(target=server.run))
for i in range(1, 3):
producer = SocketProducer(server)
producers.append(producer)
publishers.append(SocketPublishFromFile(i, backup_dir, _chunk_size, producer))
try:
listener = Listener(_dir, backup_dir, publishers, extractor_files.handler_files)
for producer in producers:
threads.append(threading.Thread(target=producer.run))
threads.append(threading.Thread(target=listener.run))
except NameError as e:
print("the parameter in args must be 'rabbit' or 'socket'!")
for thread in threads:
thread.start()
|
job_manager.py
|
import threading as th
import time, logging, importlib, os, time
import inspect
import os
import sqlite3
from datetime import datetime
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',)
def mk_thread(function):
def wrapper(*args, **kwargs):
thread = th.Thread(target=function, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class JobManager():
"""For general documentation see presentation on github."""
def __init__(self):
# Number of exp run (so one readout is on experiment).
self.number_of_meas_run = 0
self.station = None
# Array with init/readout elements. These arrays contain a dict with an identifier and the readout/init library
self.init_elements = []
self.readout_elements = []
# Array containing the active sequences that can be played.
self.active_elements = []
# Array containing all the active calibration routines. Same data structure as for init/readout list.
self.calib_elements = []
self.calib_to_run = []
self.queue = []
self.__check_for_calibration_routines()
self.__manage_jobs()
def add_init_element(self, location):
# Adds the location where a class that makes the init element can be found. Path can be absolute/relative
# This can be a file or a directory.
# Location: str
# Location: list with type string
self.init_elements = self.__import_files(location, self.init_elements)
def add_readout_element(self, location):
self.readout_elements = self.__import_files(location, self.readout_elements)
def add_calib_elements(self, location):
self.calib_elements = self.__import_files(location, self.calib_elements)
def add_job(self, job):
# Check job class is correct:
# add to queue
self.queue.append(job)
@mk_thread
def __check_for_calibration_routines(self):
# Function that checks if a calibration needs to be done -- called automatically
self.do_calib = 0
while self.do_calib < 10:
lock = th.Lock()
lock.acquire()
for i in self.init_elements:
if i[2].do_calib():
self.calib_to_run.append(i[2])
for i in self.readout_elements:
if i[2].do_calib():
self.calib_to_run.append(i[2])
for i in self.calib_elements:
if i[2].do_calib():
self.calib_to_run.append(i[2])
lock.release()
time.sleep(0.1)
self.do_calib += 1
def __import_files(self,location, already_imported):
# simple function that import files
# locations list(), type str
# already import list(), type dict
# Convert to wanted data type
if type(location) != list:
location = [location]
files_to_load = []
# Get all python files (if input is a dir).
for i in location:
if os.path.isdir(i) == True:
for file in os.listdir(i):
if file.endswith('.py'):
files_to_load.append(os.path.splitext(file)[0])
elif os.path.isfile(i) == True:
if i.endswith('.py'):
files_to_load.append(os.path.splitext(i)[0])
else:
print('Error: invalid file given.')
# import the libraries.
for file in files_to_load:
try:
my_mod = importlib.import_module(file)
except:
print(file + " could not be imported")
continue
# Check if identifier is there.
try:
mod_id = my_mod.identifier
except:
print("Error loading the " + file + " module, does this module have no identiefier?")
continue
# check if identifier is unique:
unique = True
for i in already_imported:
# print(i[0], mod_id)
if i[0] == mod_id and i[1] == my_mod:
print(file + ".py is already imported.")
unique = False
break
elif i[0] == mod_id:
print("Identifier of the imported module is not unique,\nin " + i[1].__file__+ " the same identifier is used.")
unique = False
break
# Append to array.
if unique == True:
already_imported.append((mod_id,my_mod,my_mod.__dict__[mod_id]()))
print("Imported " + my_mod.__file__)
return already_imported
@mk_thread
def __manage_jobs(self):
# placeholder for class that contains a thread that does an experiment.
self.current_job = None
j = 0
while j < 10:
for i in self.calib_to_run[:]:
# Run only while no meas are running/run while meas in case of a priority_job
if self.current_job == None:
i.calibrate()
self.calib_to_run.remove(i)
elif i.during_exp() == True:
self.current_job.pause()
i.calibrate()
self.calib_to_run.remove(i)
self.current_job.resume()
if self.queue:
self.current_job = self.queue[0]
self.current_job.start()
if self.current_job!=None:
# refreshed after job is done.
self.number_of_meas_run += self.current_job.get_num_jobs_completed()
if self.current_job.finished() == True:
# Stop all running threads
self.current_job = None
self.queue.pop(0)
time.sleep(0.1)
j+= 1
# a = JobManager()
# a.station = 'mystation'
# a.add_calib_elements('readout_Test.py')
# # a.add_readout_element('readout_Test2.py')
# a.add_init_element('init_test.py')
# # a.add_init_element('.')
|
import_thread.py
|
from collections import defaultdict
import threading
import traceback
import redis
import grpc
import ray
from ray import ray_constants
from ray import cloudpickle as pickle
import ray._private.profiling as profiling
import logging
logger = logging.getLogger(__name__)
class ImportThread:
"""A thread used to import exports from the driver or other workers.
Attributes:
worker: the worker object in this process.
mode: worker mode
redis_client: the redis client used to query exports.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
imported_collision_identifiers: This is a dictionary mapping collision
identifiers for the exported remote functions and actor classes to
the number of times that collision identifier has appeared. This is
used to provide good error messages when the same function or class
is exported many times.
"""
def __init__(self, worker, mode, threads_stopped):
self.worker = worker
self.mode = mode
self.redis_client = worker.redis_client
self.gcs_client = worker.gcs_client
self.threads_stopped = threads_stopped
self.imported_collision_identifiers = defaultdict(int)
# Keep track of the number of imports that we've imported.
self.num_imported = 0
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start()
def join_import_thread(self):
"""Wait for the thread to exit."""
self.t.join()
def _run(self):
import_pubsub_client = self.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
import_pubsub_client.subscribe("__keyspace@0__:Exports")
try:
self._do_importing()
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = import_pubsub_client.get_message()
if msg is None:
self.threads_stopped.wait(timeout=0.01)
continue
if msg["type"] == "subscribe":
continue
self._do_importing()
except (OSError, redis.exceptions.ConnectionError, grpc.RpcError) as e:
logger.error(f"ImportThread: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
import_pubsub_client.close()
def _do_importing(self):
while True:
export_key = ray._private.function_manager.make_export_key(
self.num_imported + 1)
key = self.gcs_client.internal_kv_get(
export_key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
if key is not None:
self._process_key(key)
self.num_imported += 1
else:
break
def _get_import_info_for_collision_detection(self, key):
"""Retrieve the collision identifier, type, and name of the import."""
if key.startswith(b"RemoteFunction"):
collision_identifier, function_name = self._internal_kv_multiget(
key, ["collision_identifier", "function_name"])
return (collision_identifier,
ray._private.utils.decode(function_name.encode()),
"remote function")
elif key.startswith(b"ActorClass"):
collision_identifier, class_name = self._internal_kv_multiget(
key, ["collision_identifier", "class_name"])
return collision_identifier, ray._private.utils.decode(
class_name.encode()), "actor"
def _process_key(self, key):
"""Process the given export key from redis."""
if self.mode != ray.WORKER_MODE:
# If the same remote function or actor definition appears to be
# exported many times, then print a warning. We only issue this
# warning from the driver so that it is only triggered once instead
# of many times. TODO(rkn): We may want to push this to the driver
# through Redis so that it can be displayed in the dashboard more
# easily.
if (key.startswith(b"RemoteFunction")
or key.startswith(b"ActorClass")):
collision_identifier, name, import_type = (
self._get_import_info_for_collision_detection(key))
self.imported_collision_identifiers[collision_identifier] += 1
if (self.imported_collision_identifiers[collision_identifier]
== ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD):
logger.warning(
"The %s '%s' has been exported %s times. It's "
"possible that this warning is accidental, but this "
"may indicate that the same remote function is being "
"defined repeatedly from within many tasks and "
"exported to all of the workers. This can be a "
"performance issue and can be resolved by defining "
"the remote function on the driver instead. See "
"https://github.com/ray-project/ray/issues/6240 for "
"more discussion.", import_type, name,
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD)
if key.startswith(b"RemoteFunction"):
# TODO (Alex): There's a race condition here if the worker is
# shutdown before the function finished registering (because core
# worker's global worker is unset before shutdown and is needed
# for profiling).
# with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.
fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
with self.worker.function_actor_manager.cv:
# Function manager may be waiting on actor class to be
# loaded for deserialization, notify it to wake up and
# check if the actor class it was looking for is loaded
self.worker.function_actor_manager.cv.notify_all()
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
assert False, "This code should be unreachable."
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(job_id, serialized_function) = self._internal_kv_multiget(
key, ["job_id", "function"])
if self.worker.mode == ray.SCRIPT_MODE:
return
if ray_constants.ISOLATE_EXPORTS and \
job_id != self.worker.current_job_id.binary():
return
try:
# FunctionActorManager may call pickle.loads at the same time.
# Importing the same module in different threads causes deadlock.
with self.worker.function_actor_manager.lock:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
ray._private.utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
job_id=ray.JobID(job_id))
def _internal_kv_multiget(self, key, fields):
vals = self.gcs_client.internal_kv_get(
key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
if vals is None:
vals = {}
else:
vals = pickle.loads(vals)
return (vals.get(field) for field in fields)
|
template_03.py
|
__author__ = "Wenjie Chen"
__email__ = "wc2685@columbia.edu"
import os
import time
from multiprocessing import Process
from phonenumbers.phonenumberutil import region_code_for_country_code
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.streaming import StreamingContext
STORE_DIR = os.path.join(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), "res")
class template_03:
"""
The third template to analyze the country of callednumber
"""
def __init__(self, IP="localhost", interval=10, port=9003):
self.sc = SparkContext.getOrCreate(SparkConf().setMaster("local[2]"))
# create sql context, used for saving rdd
self.sql_context = SparkSession(self.sc)
# create the Streaming Context from the above spark context with batch interval size (seconds)
self.ssc = StreamingContext(self.sc, 1)
self.IP = IP
self.interval = interval
self.port = port
# read data from port
self.lines = self.ssc.socketTextStream(self.IP, self.port)
def __str__(self):
pass
def count_duration(self):
def updateFunc(new_values, last_sum):
return sum(new_values) + (last_sum or 0)
# Drop all invalid data. TODO
callednumber = self.lines.map(lambda x: (x.split("|")[3]))
place = callednumber.map(lambda x: region_code_for_country_code(
int(x.split("-")[0].split("+")[1])))
place_count = place.map(lambda place: (place, 1)).reduceByKey(
lambda x, y: x + y).updateStateByKey(updateFunc)
place_count.pprint()
place_count.foreachRDD(
lambda rdd: rdd.sortBy(lambda x: x[0]).toDF().toPandas().to_json(
os.path.join(STORE_DIR, "tmp3",
"region.json")) if not rdd.isEmpty() else None)
def template_3_main():
test_temp_3 = template_03(IP="localhost", port=9003)
test_temp_3.count_duration()
test_temp_3.ssc.checkpoint(
os.path.join(os.path.dirname(STORE_DIR), "checkpoints-3"))
test_temp_3.ssc.start()
print("Start process 3 for template 3")
time.sleep(60)
# test_temp_0.ssc.stop(stopSparkContext=False, stopGraceFully=True)
test_temp_3.ssc.awaitTermination() # used for real time
if __name__ == '__main__':
p3 = Process(target=template_3_main)
p3.start()
print("Wait for terminated")
p3.join()
|
game.py
|
import random as rd
from time import sleep, time
from threading import Thread, Lock
import pygame
from load_image import load_image
from run_with_fps import run_with_fps, ExitLoop
from fatal_exceptions import fatal_exceptions
from enemy import Enemy
from player import Player
from health_osd import HealthOSD
from game_over import GameOver
MAX_ENEMY_HP = 5
MIN_ENEMY_HP = 3
class Game:
def __init__(self, window_title):
# Object initialization
self.lock = Lock()
self.do_quit = False
self.sprites = pygame.sprite.Group()
self.add_queue = []
# Pygame initialization
pygame.init()
self.size = self.width, self.height = 800, 600
self.screen = pygame.display.set_mode(self.size)
pygame.display.set_caption(window_title)
# Sprites initialization
self.player = Player(game=self, gravity=1700, pos=[300, 200])
self.sprites.add(self.player)
self.health_osd = HealthOSD(game=self, pos=[620, 18])
self.sprites.add(self.health_osd)
self.last_mob_spawn = None
def maybe_spawn_mobs(self):
SPAWN_MOB_EACH = 2.0
cur_time = time()
if self.last_mob_spawn is None:
self.last_mob_spawn = cur_time
return False
if cur_time - self.last_mob_spawn < SPAWN_MOB_EACH:
return False
self.last_mob_spawn = cur_time
self.sprites.add(
Enemy(
health=rd.randint(MIN_ENEMY_HP, MAX_ENEMY_HP),
image='enemy.png',
game=self,
gravity=1700,
pos=[rd.randint(0, 800), 0]
)
)
return True
def draw(self):
with self.lock:
if self.do_quit:
raise ExitLoop()
self.screen.fill((0, 0, 0))
self.sprites.draw(self.screen)
pygame.display.flip()
def process(self):
with self.lock:
for event in pygame.event.get():
self.process_event(event)
self.process_keys()
if self.do_quit:
raise ExitLoop()
try:
self.sprites.add(*self.add_queue)
self.add_queue = []
self.remove_dead_sprites()
self.sprites.update()
self.maybe_spawn_mobs()
except GameOver:
self.do_quit = True
self.game_over()
def process_event(self, event):
if event.type == pygame.QUIT:
self.do_quit = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
self.do_quit = True
def process_keys(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.player.go_left()
if keys[pygame.K_RIGHT]:
self.player.go_right()
if keys[pygame.K_UP]:
self.player.jump()
if keys[pygame.K_SPACE]:
self.player.maybe_shoot()
pass
@fatal_exceptions
def draw_loop(self, fps):
run_with_fps(fps, self.draw)
@fatal_exceptions
def process_loop(self, tps):
run_with_fps(tps, self.process)
def loop(self, fps, tps):
draw_thread = Thread(target=self.draw_loop, args=[fps])
process_thread = Thread(target=self.process_loop, args=[tps])
draw_thread.start()
process_thread.start()
draw_thread.join()
process_thread.join()
def add_sprite(self, sprite):
self.add_queue.append(sprite)
def game_over(self):
self.health_osd.update()
self.sprites.draw(self.screen)
image = load_image('game_over.png')
rect = pygame.Rect((0, 0), self.size)
self.screen.blit(image, rect)
pygame.display.flip()
sleep(5)
raise ExitLoop()
def remove_dead_sprites(self):
to_remove = []
for i in self.sprites.sprites():
if i.dead:
to_remove.append(i)
self.sprites.remove(*to_remove)
|
hub.py
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from os.path import exists, basename
from time import time, sleep
from knack.log import get_logger
from knack.util import CLIError
from enum import Enum, EnumMeta
from azext_iot.constants import (
DEVICE_DEVICESCOPE_PREFIX,
TRACING_PROPERTY,
TRACING_ALLOWED_FOR_LOCATION,
TRACING_ALLOWED_FOR_SKU,
IOTHUB_TRACK_2_SDK_MIN_VERSION,
)
from azext_iot.common.sas_token_auth import SasTokenAuthentication
from azext_iot.common.shared import (
DeviceAuthType,
SdkType,
ProtocolType,
ConfigType,
KeyType,
SettleType,
RenewKeyType,
IoTHubStateType,
DeviceAuthApiType,
ConnectionStringParser,
EntityStatusType
)
from azext_iot.iothub.providers.discovery import IotHubDiscovery
from azext_iot.common.utility import (
read_file_content,
validate_key_value_pairs,
unpack_msrest_error,
init_monitoring,
process_json_arg,
ensure_iothub_sdk_min_version,
generate_key,
)
from azext_iot._factory import SdkResolver, CloudError
from azext_iot.operations.generic import _execute_query, _process_top
import pprint
logger = get_logger(__name__)
printer = pprint.PrettyPrinter(indent=2)
# Query
def iot_query(
cmd,
query_command,
hub_name=None,
top=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
top = _process_top(top)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
query_args = [query_command]
query_method = service_sdk.query.get_twins
return _execute_query(query_args, query_method, top)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
# Device
def iot_device_show(
cmd,
device_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
return _iot_device_show(target, device_id)
def _iot_device_show(target, device_id):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
device = service_sdk.devices.get_identity(
id=device_id, raw=True
).response.json()
device["hub"] = target.get("entity")
return device
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_list(
cmd,
hub_name=None,
top=1000,
edge_enabled=False,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
query = (
"select * from devices where capabilities.iotEdge = true"
if edge_enabled
else "select * from devices"
)
result = iot_query(
cmd=cmd,
query_command=query,
hub_name=hub_name,
top=top,
resource_group_name=resource_group_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
if not result:
logger.info('No registered devices found on hub "%s".', hub_name)
return result
def iot_device_create(
cmd,
device_id,
hub_name=None,
edge_enabled=False,
auth_method=DeviceAuthType.shared_private_key.value,
primary_key=None,
secondary_key=None,
primary_thumbprint=None,
secondary_thumbprint=None,
status=EntityStatusType.enabled.value,
status_reason=None,
valid_days=None,
output_dir=None,
device_scope=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
if any([valid_days, output_dir]):
valid_days = 365 if not valid_days else int(valid_days)
if output_dir and not exists(output_dir):
raise CLIError(
"certificate output directory of '{}' does not exist.".format(
output_dir
)
)
cert = _create_self_signed_cert(device_id, valid_days, output_dir)
primary_thumbprint = cert["thumbprint"]
try:
device = _assemble_device(
is_update=False,
device_id=device_id,
auth_method=auth_method,
edge_enabled=edge_enabled,
pk=primary_thumbprint if auth_method == DeviceAuthType.x509_thumbprint.value else primary_key,
sk=secondary_thumbprint if auth_method == DeviceAuthType.x509_thumbprint.value else secondary_key,
status=status,
status_reason=status_reason,
device_scope=device_scope,
)
output = service_sdk.devices.create_or_update_identity(
id=device_id, device=device
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
except ValueError as ve:
raise CLIError(ve)
return output
def _assemble_device(
is_update,
device_id,
auth_method,
edge_enabled,
pk=None,
sk=None,
status=EntityStatusType.enabled.value,
status_reason=None,
device_scope=None,
):
from azext_iot.sdk.iothub.service.models import DeviceCapabilities, Device
auth = _assemble_auth(auth_method, pk, sk)
cap = DeviceCapabilities(iot_edge=edge_enabled)
if is_update:
device = Device(
device_id=device_id,
authentication=auth,
capabilities=cap,
status=status,
status_reason=status_reason,
device_scope=device_scope,
)
return device
if edge_enabled:
parent_scopes = []
if device_scope:
parent_scopes = [device_scope]
device = Device(
device_id=device_id,
authentication=auth,
capabilities=cap,
status=status,
status_reason=status_reason,
parent_scopes=parent_scopes,
)
return device
else:
device = Device(
device_id=device_id,
authentication=auth,
capabilities=cap,
status=status,
status_reason=status_reason,
device_scope=device_scope,
)
return device
def _assemble_auth(auth_method, pk, sk):
from azext_iot.sdk.iothub.service.models import (
AuthenticationMechanism,
SymmetricKey,
X509Thumbprint,
)
auth = None
if auth_method in [
DeviceAuthType.shared_private_key.name,
DeviceAuthApiType.sas.value,
]:
if any([pk, sk]) and not all([pk, sk]):
raise ValueError("When configuring symmetric key auth both primary and secondary keys are required.")
auth = AuthenticationMechanism(
symmetric_key=SymmetricKey(primary_key=pk, secondary_key=sk),
type=DeviceAuthApiType.sas.value,
)
elif auth_method in [
DeviceAuthType.x509_thumbprint.name,
DeviceAuthApiType.selfSigned.value,
]:
if not pk:
raise ValueError("When configuring selfSigned auth the primary thumbprint is required.")
auth = AuthenticationMechanism(
x509_thumbprint=X509Thumbprint(
primary_thumbprint=pk, secondary_thumbprint=sk
),
type=DeviceAuthApiType.selfSigned.value,
)
elif auth_method in [
DeviceAuthType.x509_ca.name,
DeviceAuthApiType.certificateAuthority.value,
]:
auth = AuthenticationMechanism(
type=DeviceAuthApiType.certificateAuthority.value
)
else:
raise ValueError("Authorization method {} invalid.".format(auth_method))
return auth
def _create_self_signed_cert(subject, valid_days, output_path=None):
from azext_iot.common.certops import create_self_signed_certificate
return create_self_signed_certificate(subject, valid_days, output_path)
def update_iot_device_custom(
instance,
edge_enabled=None,
status=None,
status_reason=None,
auth_method=None,
primary_thumbprint=None,
secondary_thumbprint=None,
primary_key=None,
secondary_key=None,
):
if edge_enabled is not None:
instance["capabilities"]["iotEdge"] = edge_enabled
if status is not None:
instance["status"] = status
if status_reason is not None:
instance["statusReason"] = status_reason
auth_type = instance["authentication"]["type"]
if auth_method is not None:
if auth_method == DeviceAuthType.shared_private_key.name:
auth = DeviceAuthApiType.sas.value
if (primary_key and not secondary_key) or (
not primary_key and secondary_key
):
raise CLIError("primary + secondary Key required with sas auth")
instance["authentication"]["symmetricKey"]["primaryKey"] = primary_key
instance["authentication"]["symmetricKey"]["secondaryKey"] = secondary_key
elif auth_method == DeviceAuthType.x509_thumbprint.name:
auth = DeviceAuthApiType.selfSigned.value
if not any([primary_thumbprint, secondary_thumbprint]):
raise CLIError(
"primary or secondary Thumbprint required with selfSigned auth"
)
if primary_thumbprint:
instance["authentication"]["x509Thumbprint"][
"primaryThumbprint"
] = primary_thumbprint
if secondary_thumbprint:
instance["authentication"]["x509Thumbprint"][
"secondaryThumbprint"
] = secondary_thumbprint
elif auth_method == DeviceAuthType.x509_ca.name:
auth = DeviceAuthApiType.certificateAuthority.value
else:
raise ValueError("Authorization method {} invalid.".format(auth_method))
instance["authentication"]["type"] = auth
# if no new auth_method is provided, validate secondary auth arguments and update accordingly
elif auth_type == DeviceAuthApiType.sas.value:
if any([primary_thumbprint, secondary_thumbprint]):
raise ValueError(
"Device authorization method {} does not support primary or secondary thumbprints.".format(
DeviceAuthType.shared_private_key.name
)
)
if primary_key:
instance["authentication"]["symmetricKey"]["primaryKey"] = primary_key
if secondary_key:
instance["authentication"]["symmetricKey"]["secondaryKey"] = secondary_key
elif auth_type == DeviceAuthApiType.selfSigned.value:
if any([primary_key, secondary_key]):
raise ValueError(
"Device authorization method {} does not support primary or secondary keys.".format(
DeviceAuthType.x509_thumbprint.name
)
)
if primary_thumbprint:
instance["authentication"]["x509Thumbprint"][
"primaryThumbprint"
] = primary_thumbprint
if secondary_thumbprint:
instance["authentication"]["x509Thumbprint"][
"secondaryThumbprint"
] = secondary_thumbprint
return instance
def iot_device_update(
cmd,
device_id,
parameters,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
auth, pk, sk = _parse_auth(parameters)
updated_device = _assemble_device(
True,
parameters["deviceId"],
auth,
parameters["capabilities"]["iotEdge"],
pk,
sk,
parameters["status"].lower(),
parameters.get("statusReason"),
parameters.get("deviceScope"),
)
updated_device.etag = etag if etag else "*"
return _iot_device_update(target, device_id, updated_device)
def _iot_device_update(target, device_id, device):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
headers = {}
headers["If-Match"] = '"{}"'.format(device.etag)
return service_sdk.devices.create_or_update_identity(
id=device_id, device=device, custom_headers=headers
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_delete(
cmd,
device_id,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
service_sdk.devices.delete_identity(id=device_id, custom_headers=headers)
return
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def _update_device_key(target, device, auth_method, pk, sk, etag=None):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
auth = _assemble_auth(auth_method, pk, sk)
device["authentication"] = auth
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
return service_sdk.devices.create_or_update_identity(
id=device["deviceId"],
device=device,
custom_headers=headers,
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_key_regenerate(
cmd,
hub_name,
device_id,
renew_key_type,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
device = _iot_device_show(target, device_id)
if device["authentication"]["type"] != DeviceAuthApiType.sas.value:
raise CLIError("Device authentication should be of type sas")
pk = device["authentication"]["symmetricKey"]["primaryKey"]
sk = device["authentication"]["symmetricKey"]["secondaryKey"]
if renew_key_type == RenewKeyType.primary.value:
pk = generate_key()
if renew_key_type == RenewKeyType.secondary.value:
sk = generate_key()
if renew_key_type == RenewKeyType.swap.value:
temp = pk
pk = sk
sk = temp
return _update_device_key(
target, device, device["authentication"]["type"], pk, sk, etag
)
def iot_device_get_parent(
cmd,
device_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
child_device = _iot_device_show(target, device_id)
_validate_child_device(child_device)
parent_scope = child_device["parentScopes"][0]
parent_device_id = parent_scope[
len(DEVICE_DEVICESCOPE_PREFIX) : parent_scope.rindex("-")
]
return _iot_device_show(target, parent_device_id)
def iot_device_set_parent(
cmd,
device_id,
parent_id,
force=False,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
parent_device = _iot_device_show(target, parent_id)
_validate_edge_device(parent_device)
child_device = _iot_device_show(target, device_id)
_validate_parent_child_relation(child_device, force)
_update_device_parent(
target,
child_device,
child_device["capabilities"]["iotEdge"],
parent_device["deviceScope"],
)
def iot_device_children_add(
cmd,
device_id,
child_list,
force=False,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
devices = []
edge_device = _iot_device_show(target, device_id)
_validate_edge_device(edge_device)
converted_child_list = child_list
for child_device_id in converted_child_list:
child_device = _iot_device_show(target, child_device_id.strip())
_validate_parent_child_relation(child_device, force)
devices.append(child_device)
for device in devices:
_update_device_parent(
target,
device,
device["capabilities"]["iotEdge"],
edge_device["deviceScope"],
)
def iot_device_children_remove(
cmd,
device_id,
child_list=None,
remove_all=False,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
devices = []
if remove_all:
result = _iot_device_children_list(
cmd, device_id, hub_name, resource_group_name, login
)
if not result:
raise CLIError(
'No registered child devices found for "{}" edge device.'.format(
device_id
)
)
for child_device_id in [str(x["deviceId"]) for x in result]:
child_device = _iot_device_show(target, child_device_id.strip())
devices.append(child_device)
elif child_list:
edge_device = _iot_device_show(target, device_id)
_validate_edge_device(edge_device)
converted_child_list = child_list
for child_device_id in converted_child_list:
child_device = _iot_device_show(target, child_device_id.strip())
_validate_child_device(child_device)
if child_device["parentScopes"] == [edge_device["deviceScope"]]:
devices.append(child_device)
else:
raise CLIError(
'The entered child device "{}" isn\'t assigned as a child of edge device "{}"'.format(
child_device_id.strip(), device_id
)
)
else:
raise CLIError(
"Please specify child list or use --remove-all to remove all children."
)
for device in devices:
_update_device_parent(target, device, device["capabilities"]["iotEdge"])
def iot_device_children_list(
cmd,
device_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
result = _iot_device_children_list(
cmd=cmd,
device_id=device_id,
hub_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
return [device["deviceId"] for device in result]
def _iot_device_children_list(
cmd,
device_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
device = _iot_device_show(target, device_id)
_validate_edge_device(device)
query = (
"select deviceId from devices where array_contains(parentScopes, '{}')".format(
device["deviceScope"]
)
)
# TODO: Inefficient
return iot_query(
cmd=cmd,
query_command=query,
hub_name=hub_name,
top=None,
resource_group_name=resource_group_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
def _update_device_parent(target, device, is_edge, device_scope=None):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
if is_edge:
parent_scopes = []
if device_scope:
parent_scopes = [device_scope]
device["parentScopes"] = parent_scopes
else:
if not device_scope:
device_scope = ""
device["deviceScope"] = device_scope
etag = device.get("etag", None)
if etag:
headers = {}
headers["If-Match"] = '"{}"'.format(etag)
service_sdk.devices.create_or_update_identity(
id=device["deviceId"],
device=device,
custom_headers=headers,
)
return
raise LookupError("device etag not found.")
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
except LookupError as err:
raise CLIError(err)
def _validate_edge_device(device):
if not device["capabilities"]["iotEdge"]:
raise CLIError(
'The device "{}" should be edge device.'.format(device["deviceId"])
)
def _validate_child_device(device):
if "parentScopes" not in device:
raise CLIError(
'Device "{}" doesn\'t support parent device functionality.'.format(
device["deviceId"]
)
)
if not device["parentScopes"]:
raise CLIError(
'Device "{}" doesn\'t have any parent device.'.format(device["deviceId"])
)
def _validate_parent_child_relation(child_device, force):
if "parentScopes" not in child_device or child_device["parentScopes"] == []:
return
else:
if not force:
raise CLIError(
"The entered device \"{}\" already has a parent device, please use '--force'"
" to overwrite".format(child_device["deviceId"])
)
return
# Module
def iot_device_module_create(
cmd,
device_id,
module_id,
hub_name=None,
auth_method=DeviceAuthType.shared_private_key.value,
primary_key=None,
secondary_key=None,
primary_thumbprint=None,
secondary_thumbprint=None,
valid_days=None,
output_dir=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
if any([valid_days, output_dir]):
valid_days = 365 if not valid_days else int(valid_days)
if output_dir and not exists(output_dir):
raise CLIError(
"certificate output directory of '{}' does not exist.".format(
output_dir
)
)
cert = _create_self_signed_cert(module_id, valid_days, output_dir)
primary_thumbprint = cert["thumbprint"]
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
module = _assemble_module(
device_id=device_id,
module_id=module_id,
auth_method=auth_method,
pk=primary_thumbprint if auth_method == DeviceAuthType.x509_thumbprint.value else primary_key,
sk=secondary_thumbprint if auth_method == DeviceAuthType.x509_thumbprint.value else secondary_key,
)
return service_sdk.modules.create_or_update_identity(
id=device_id, mid=module_id, module=module
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
except ValueError as ve:
raise CLIError(ve)
def _assemble_module(device_id, module_id, auth_method, pk=None, sk=None):
from azext_iot.sdk.iothub.service.models import Module
auth = _assemble_auth(auth_method, pk, sk)
module = Module(module_id=module_id, device_id=device_id, authentication=auth)
return module
def iot_device_module_update(
cmd,
device_id,
module_id,
parameters,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
updated_module = _handle_module_update_params(parameters)
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
return service_sdk.modules.create_or_update_identity(
id=device_id,
mid=module_id,
module=updated_module,
custom_headers=headers,
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def _handle_module_update_params(parameters):
auth, pk, sk = _parse_auth(parameters)
return _assemble_module(
device_id=parameters["deviceId"],
module_id=parameters["moduleId"],
auth_method=auth,
pk=pk,
sk=sk,
)
def _parse_auth(parameters):
valid_auth = [
DeviceAuthApiType.sas.value,
DeviceAuthApiType.selfSigned.value,
DeviceAuthApiType.certificateAuthority.value,
]
auth = parameters["authentication"].get("type")
if auth not in valid_auth:
raise CLIError("authentication.type must be one of {}".format(valid_auth))
pk = sk = None
if auth == DeviceAuthApiType.sas.value:
pk = parameters["authentication"]["symmetricKey"]["primaryKey"]
sk = parameters["authentication"]["symmetricKey"]["secondaryKey"]
elif auth == DeviceAuthApiType.selfSigned.value:
pk = parameters["authentication"]["x509Thumbprint"]["primaryThumbprint"]
sk = parameters["authentication"]["x509Thumbprint"]["secondaryThumbprint"]
if not any([pk, sk]):
raise CLIError(
"primary + secondary Thumbprint required with selfSigned auth"
)
return auth, pk, sk
def iot_device_module_key_regenerate(
cmd,
hub_name,
device_id,
module_id,
renew_key_type,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
module = service_sdk.modules.get_identity(
id=device_id, mid=module_id, raw=True
).response.json()
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
if module["authentication"]["type"] != "sas":
raise CLIError("Module authentication should be of type sas")
pk = module["authentication"]["symmetricKey"]["primaryKey"]
sk = module["authentication"]["symmetricKey"]["secondaryKey"]
if renew_key_type == RenewKeyType.primary.value:
pk = generate_key()
if renew_key_type == RenewKeyType.secondary.value:
sk = generate_key()
if renew_key_type == RenewKeyType.swap.value:
temp = pk
pk = sk
sk = temp
module["authentication"]["symmetricKey"]["primaryKey"] = pk
module["authentication"]["symmetricKey"]["secondaryKey"] = sk
try:
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
return service_sdk.modules.create_or_update_identity(
id=device_id,
mid=module_id,
module=module,
custom_headers=headers,
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_module_list(
cmd,
device_id,
hub_name=None,
top=1000,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
return service_sdk.modules.get_modules_on_device(device_id)[:top]
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_module_show(
cmd,
device_id,
module_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
return _iot_device_module_show(target, device_id, module_id)
def _iot_device_module_show(target, device_id, module_id):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
module = service_sdk.modules.get_identity(
id=device_id, mid=module_id, raw=True
).response.json()
module["hub"] = target.get("entity")
return module
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_module_delete(
cmd,
device_id,
module_id,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
service_sdk.modules.delete_identity(
id=device_id, mid=module_id, custom_headers=headers
)
return
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_module_twin_show(
cmd,
device_id,
module_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
return _iot_device_module_twin_show(
target=target, device_id=device_id, module_id=module_id
)
def _iot_device_module_twin_show(target, device_id, module_id):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
return service_sdk.modules.get_twin(
id=device_id, mid=module_id, raw=True
).response.json()
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_module_twin_update(
cmd,
device_id,
module_id,
parameters,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
from azext_iot.common.utility import verify_transform
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
verify = {}
if parameters.get("properties"):
if parameters["properties"].get("desired"):
verify = {"properties.desired": dict}
if parameters.get("tags"):
verify["tags"] = dict
verify_transform(parameters, verify)
return service_sdk.modules.update_twin(
id=device_id,
mid=module_id,
device_twin_info=parameters,
custom_headers=headers,
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
except (AttributeError, TypeError) as err:
raise CLIError(err)
def iot_device_module_twin_replace(
cmd,
device_id,
module_id,
target_json,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
target_json = process_json_arg(target_json, argument_name="json")
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
return service_sdk.modules.replace_twin(
id=device_id,
mid=module_id,
device_twin_info=target_json,
custom_headers=headers,
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_edge_set_modules(
cmd,
device_id,
content,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
from azext_iot.sdk.iothub.service.models import ConfigurationContent
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
content = process_json_arg(content, argument_name="content")
processed_content = _process_config_content(
content, config_type=ConfigType.edge
)
content = ConfigurationContent(**processed_content)
service_sdk.configuration.apply_on_edge_device(id=device_id, content=content)
return iot_device_module_list(cmd, device_id, hub_name=hub_name, login=login)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_edge_deployment_create(
cmd,
config_id,
content,
hub_name=None,
target_condition="",
priority=0,
labels=None,
metrics=None,
layered=False,
no_validation=False,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
# Short-term fix for --no-validation
config_type = ConfigType.layered if layered or no_validation else ConfigType.edge
return _iot_hub_configuration_create(
cmd=cmd,
config_id=config_id,
content=content,
hub_name=hub_name,
target_condition=target_condition,
priority=priority,
labels=labels,
metrics=metrics,
resource_group_name=resource_group_name,
login=login,
config_type=config_type,
auth_type_dataplane=auth_type_dataplane,
)
def iot_hub_configuration_create(
cmd,
config_id,
content,
hub_name=None,
target_condition="",
priority=0,
labels=None,
metrics=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
return _iot_hub_configuration_create(
cmd=cmd,
config_id=config_id,
content=content,
hub_name=hub_name,
target_condition=target_condition,
priority=priority,
labels=labels,
metrics=metrics,
resource_group_name=resource_group_name,
login=login,
config_type=ConfigType.adm,
auth_type_dataplane=auth_type_dataplane,
)
def _iot_hub_configuration_create(
cmd,
config_id,
content,
config_type,
hub_name=None,
target_condition="",
priority=0,
labels=None,
metrics=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
from azext_iot.sdk.iothub.service.models import (
Configuration,
ConfigurationContent,
ConfigurationMetrics,
)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
logger.debug("ensuring lowercase configuration Id...")
config_id = config_id.lower()
metrics_key = "queries"
content = process_json_arg(content, argument_name="content")
processed_content = _process_config_content(content, config_type)
if "module_content" in processed_content:
required_target_prefix = "from devices.modules where"
lower_target_condition = target_condition.lower()
if not lower_target_condition.startswith(required_target_prefix):
raise CLIError(
"The target condition for a module configuration must start with '{}'".format(
required_target_prefix
)
)
if metrics:
metrics = process_json_arg(metrics, argument_name="metrics")
if "metrics" in metrics:
metrics = metrics["metrics"]
if metrics_key not in metrics:
raise CLIError(
"metrics json must include the '{}' property".format(metrics_key)
)
metrics = metrics[metrics_key]
if labels:
labels = process_json_arg(labels, argument_name="labels")
config_content = ConfigurationContent(**processed_content)
config_metrics = ConfigurationMetrics(queries=metrics)
config = Configuration(
id=config_id,
schema_version="2.0",
labels=labels,
content=config_content,
metrics=config_metrics,
target_condition=target_condition,
etag="*",
priority=priority,
)
try:
return service_sdk.configuration.create_or_update(
id=config_id, configuration=config
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def _process_config_content(content, config_type):
from knack.util import to_snake_case
# Supports scenario where configuration payload is contained in 'content' key
if "content" in content:
content = content["content"]
# Create new config dict to remove superflous properties
processed_content = {}
if config_type == ConfigType.adm:
valid_adm_keys = ["deviceContent", "moduleContent"]
if not all(key in content for key in valid_adm_keys):
for key in valid_adm_keys:
if key in content:
processed_content[to_snake_case(key)] = content[key]
return processed_content
raise CLIError(
"Automatic device configuration payloads require property: {}".format(
" or ".join(map(str, valid_adm_keys))
)
)
if config_type == ConfigType.edge or config_type == ConfigType.layered:
valid_edge_key = "modulesContent"
legacy_edge_key = "moduleContent"
if valid_edge_key in content:
processed_content[valid_edge_key] = content[valid_edge_key]
elif legacy_edge_key in content:
logger.warning(
"'%s' is deprecated for edge deployments. Use '%s' instead - request is still processing...",
legacy_edge_key,
valid_edge_key,
)
processed_content[valid_edge_key] = content[legacy_edge_key]
if processed_content:
# Schema based validation currently for IoT edge deployment only
if config_type == ConfigType.edge:
_validate_payload_schema(processed_content)
processed_content[to_snake_case(valid_edge_key)] = processed_content[
valid_edge_key
]
del processed_content[valid_edge_key]
return processed_content
raise CLIError(
"Edge deployment payloads require property: {}".format(valid_edge_key)
)
def _validate_payload_schema(content):
import json
from os.path import join
from azext_iot.models.validators import JsonSchemaType, JsonSchemaValidator
from azext_iot.constants import EDGE_DEPLOYMENT_ROOT_SCHEMAS_PATH as root_schema_path
from azext_iot.common.utility import shell_safe_json_parse
EDGE_AGENT_SCHEMA_PATH = "azure-iot-edgeagent-deployment-{}.json"
EDGE_HUB_SCHEMA_PATH = "azure-iot-edgehub-deployment-{}.json"
EDGE_SCHEMA_PATH_DICT = {
"$edgeAgent": EDGE_AGENT_SCHEMA_PATH,
"$edgeHub": EDGE_HUB_SCHEMA_PATH,
}
modules_content = content["modulesContent"]
system_modules_for_validation = ["$edgeAgent", "$edgeHub"]
for sys_module in system_modules_for_validation:
if sys_module in modules_content:
if (
"properties.desired" in modules_content[sys_module]
and "schemaVersion"
in modules_content[sys_module]["properties.desired"]
):
target_schema_ver = modules_content[sys_module][
"properties.desired"
]["schemaVersion"]
target_schema_def_path = join(root_schema_path, f"{EDGE_SCHEMA_PATH_DICT[sys_module].format(target_schema_ver)}")
logger.info("Attempting to fetch schema content from %s...", target_schema_def_path)
if not exists(target_schema_def_path):
logger.info("Invalid schema path %s, skipping validation...", target_schema_def_path)
continue
try:
target_schema_def = str(read_file_content(target_schema_def_path))
target_schema_def = shell_safe_json_parse(target_schema_def)
except Exception:
logger.info(
"Unable to fetch schema content from %s skipping validation...",
target_schema_def_path,
)
continue
logger.info(f"Validating {sys_module} of deployment payload against schema...")
to_validate_content = {
sys_module: modules_content[sys_module]
}
draft_version = JsonSchemaType.draft4
if "$schema" in target_schema_def and "/draft-07/" in target_schema_def["$schema"]:
draft_version = JsonSchemaType.draft7
v = JsonSchemaValidator(target_schema_def, draft_version)
errors = v.validate(to_validate_content)
if errors:
# Pretty printing schema validation errors
raise CLIError(
json.dumps(
{"validationErrors": errors},
separators=(",", ":"),
indent=2,
)
)
def iot_hub_configuration_update(
cmd,
config_id,
parameters,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
from azext_iot.sdk.iothub.service.models import Configuration
from azext_iot.common.utility import verify_transform
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
verify = {"metrics": dict, "metrics.queries": dict, "content": dict}
if parameters.get("labels"):
verify["labels"] = dict
verify_transform(parameters, verify)
config = Configuration(
id=parameters["id"],
schema_version=parameters["schemaVersion"],
labels=parameters["labels"],
content=parameters["content"],
metrics=parameters.get("metrics", None),
target_condition=parameters["targetCondition"],
priority=parameters["priority"],
)
return service_sdk.configuration.create_or_update(
id=config_id, configuration=config, custom_headers=headers
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
except (AttributeError, TypeError) as err:
raise CLIError(err)
def iot_hub_configuration_show(
cmd,
config_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
return _iot_hub_configuration_show(target=target, config_id=config_id)
def _iot_hub_configuration_show(target, config_id):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
return service_sdk.configuration.get(id=config_id, raw=True).response.json()
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_hub_configuration_list(
cmd,
hub_name=None,
top=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
result = _iot_hub_configuration_list(
cmd=cmd,
hub_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
filtered = [
c
for c in result
if (
c["content"].get("deviceContent") is not None
or c["content"].get("moduleContent") is not None
)
]
return filtered[:top] # list[:None] == list[:len(list)]
def iot_edge_deployment_list(
cmd,
hub_name=None,
top=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
result = _iot_hub_configuration_list(
cmd,
hub_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
filtered = [c for c in result if c["content"].get("modulesContent") is not None]
return filtered[:top] # list[:None] == list[:len(list)]
def _iot_hub_configuration_list(
cmd, hub_name=None, resource_group_name=None, login=None, auth_type_dataplane=None
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
result = service_sdk.configuration.get_configurations(raw=True).response.json()
if not result:
logger.info('No configurations found on hub "%s".', hub_name)
return result
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_hub_configuration_delete(
cmd,
config_id,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
service_sdk.configuration.delete(id=config_id, custom_headers=headers)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_edge_deployment_metric_show(
cmd,
config_id,
metric_id,
metric_type="user",
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
return iot_hub_configuration_metric_show(
cmd,
config_id=config_id,
metric_id=metric_id,
metric_type=metric_type,
hub_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
def iot_hub_configuration_metric_show(
cmd,
config_id,
metric_id,
metric_type="user",
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
config = _iot_hub_configuration_show(target=target, config_id=config_id)
metric_collection = None
if metric_type == "system":
metric_collection = config["systemMetrics"].get("queries")
else:
metric_collection = config["metrics"].get("queries")
if metric_id not in metric_collection:
raise CLIError(
"The {} metric '{}' is not defined in the configuration '{}'".format(
metric_type, metric_id, config_id
)
)
metric_query = metric_collection[metric_id]
query_args = [metric_query]
query_method = service_sdk.query.get_twins
metric_result = _execute_query(query_args, query_method, None)
output = {}
output["metric"] = metric_id
output["query"] = metric_query
output["result"] = metric_result
return output
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
# Device Twin
def iot_device_twin_show(
cmd,
device_id,
hub_name=None,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
return _iot_device_twin_show(target=target, device_id=device_id)
def _iot_device_twin_show(target, device_id):
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
return service_sdk.devices.get_twin(id=device_id, raw=True).response.json()
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_twin_update_custom(instance, desired=None, tags=None):
payload = {}
is_patch = False
if desired:
is_patch = True
payload["properties"] = {"desired": process_json_arg(desired, "desired")}
if tags:
is_patch = True
payload["tags"] = process_json_arg(tags, "tags")
return payload if is_patch else instance
def iot_device_twin_update(
cmd,
device_id,
parameters,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
return _iot_device_twin_update(target, device_id, parameters, etag)
def _iot_device_twin_update(
target,
device_id,
parameters,
etag=None,
):
from azext_iot.common.utility import verify_transform
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
verify = {}
if parameters.get("properties"):
if parameters["properties"].get("desired"):
verify = {"properties.desired": dict}
if parameters.get("tags"):
verify["tags"] = dict
verify_transform(parameters, verify)
return service_sdk.devices.update_twin(
id=device_id, device_twin_info=parameters, custom_headers=headers
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
except (AttributeError, TypeError) as err:
raise CLIError(err)
def iot_device_twin_replace(
cmd,
device_id,
target_json,
hub_name=None,
resource_group_name=None,
login=None,
etag=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
try:
target_json = process_json_arg(target_json, argument_name="json")
headers = {}
headers["If-Match"] = '"{}"'.format(etag if etag else "*")
return service_sdk.devices.replace_twin(
id=device_id, device_twin_info=target_json, custom_headers=headers
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_device_method(
cmd,
device_id,
method_name,
hub_name=None,
method_payload="{}",
timeout=30,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
from azext_iot.constants import (
METHOD_INVOKE_MAX_TIMEOUT_SEC,
METHOD_INVOKE_MIN_TIMEOUT_SEC,
)
if timeout > METHOD_INVOKE_MAX_TIMEOUT_SEC:
raise CLIError(
"timeout must not be over {} seconds".format(METHOD_INVOKE_MAX_TIMEOUT_SEC)
)
if timeout < METHOD_INVOKE_MIN_TIMEOUT_SEC:
raise CLIError(
"timeout must be at least {} seconds".format(METHOD_INVOKE_MIN_TIMEOUT_SEC)
)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
# Prevent msrest locking up shell
service_sdk.config.retry_policy.retries = 1
try:
if method_payload:
method_payload = process_json_arg(
method_payload, argument_name="method-payload"
)
request_body = {
"methodName": method_name,
"payload": method_payload,
"responseTimeoutInSeconds": timeout,
"connectTimeoutInSeconds": timeout,
}
return service_sdk.devices.invoke_method(
device_id=device_id,
direct_method_request=request_body,
timeout=timeout,
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
# Device Module Method Invoke
def iot_device_module_method(
cmd,
device_id,
module_id,
method_name,
hub_name=None,
method_payload="{}",
timeout=30,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
from azext_iot.constants import (
METHOD_INVOKE_MAX_TIMEOUT_SEC,
METHOD_INVOKE_MIN_TIMEOUT_SEC,
)
if timeout > METHOD_INVOKE_MAX_TIMEOUT_SEC:
raise CLIError(
"timeout must not be over {} seconds".format(METHOD_INVOKE_MAX_TIMEOUT_SEC)
)
if timeout < METHOD_INVOKE_MIN_TIMEOUT_SEC:
raise CLIError(
"timeout must not be over {} seconds".format(METHOD_INVOKE_MIN_TIMEOUT_SEC)
)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
# Prevent msrest locking up shell
service_sdk.config.retry_policy.retries = 1
try:
if method_payload:
method_payload = process_json_arg(
method_payload, argument_name="method-payload"
)
request_body = {
"methodName": method_name,
"payload": method_payload,
"responseTimeoutInSeconds": timeout,
"connectTimeoutInSeconds": timeout,
}
return service_sdk.modules.invoke_method(
device_id=device_id,
module_id=module_id,
direct_method_request=request_body,
timeout=timeout,
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
# Utility
def iot_get_sas_token(
cmd,
hub_name=None,
device_id=None,
policy_name="iothubowner",
key_type="primary",
duration=3600,
resource_group_name=None,
login=None,
module_id=None,
auth_type_dataplane=None,
connection_string=None,
):
key_type = key_type.lower()
policy_name = policy_name.lower()
if login and policy_name != "iothubowner":
raise CLIError(
"You are unable to change the sas policy with a hub connection string login."
)
if login and key_type != "primary" and not device_id:
raise CLIError(
"For non-device sas, you are unable to change the key type with a connection string login."
)
if module_id and not device_id:
raise CLIError(
"You are unable to get sas token for module without device information."
)
if connection_string:
return {
DeviceAuthApiType.sas.value: _iot_build_sas_token_from_cs(
connection_string,
duration,
).generate_sas_token()
}
return {
DeviceAuthApiType.sas.value: _iot_build_sas_token(
cmd,
hub_name,
device_id,
module_id,
policy_name,
key_type,
duration,
resource_group_name,
login,
auth_type_dataplane,
).generate_sas_token()
}
def _iot_build_sas_token_from_cs(connection_string, duration=3600):
uri = None
policy = None
key = None
parsed_cs = None
all_parsers = [
ConnectionStringParser.Module,
ConnectionStringParser.Device,
ConnectionStringParser.IotHub,
]
for parser in all_parsers:
try:
parsed_cs = parser(connection_string)
if "SharedAccessKeyName" in parsed_cs:
policy = parsed_cs["SharedAccessKeyName"]
key = parsed_cs["SharedAccessKey"]
if parser == ConnectionStringParser.IotHub:
uri = parsed_cs["HostName"]
elif parser == ConnectionStringParser.Module:
uri = "{}/devices/{}/modules/{}".format(
parsed_cs["HostName"], parsed_cs["DeviceId"], parsed_cs["ModuleId"]
)
elif parser == ConnectionStringParser.Device:
uri = "{}/devices/{}".format(parsed_cs["HostName"], parsed_cs["DeviceId"])
else:
raise CLIError("Given Connection String was not in a supported format.")
return SasTokenAuthentication(uri, policy, key, duration)
except ValueError:
continue
raise CLIError("Given Connection String was not in a supported format.")
def _iot_build_sas_token(
cmd,
hub_name=None,
device_id=None,
module_id=None,
policy_name="iothubowner",
key_type="primary",
duration=3600,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
from azext_iot.common._azure import (
parse_iot_device_connection_string,
parse_iot_device_module_connection_string,
)
# There is no dataplane operation for a pure IoT Hub sas token
if all([device_id is None, module_id is None]):
auth_type_dataplane = "key"
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
policy_name=policy_name,
login=login,
auth_type=auth_type_dataplane,
)
uri = None
policy = None
key = None
if device_id:
logger.info(
'Obtaining device "%s" details from registry, using IoT Hub policy "%s"',
device_id,
policy_name,
)
device = _iot_device_show(target, device_id)
if module_id:
module = _iot_device_module_show(target, device_id, module_id)
module_cs = _build_device_or_module_connection_string(
entity=module, key_type=key_type
)
uri = "{}/devices/{}/modules/{}".format(
target["entity"], device_id, module_id
)
try:
parsed_module_cs = parse_iot_device_module_connection_string(module_cs)
except ValueError as e:
logger.debug(e)
raise CLIError("This module does not support SAS auth.")
key = parsed_module_cs["SharedAccessKey"]
else:
device_cs = _build_device_or_module_connection_string(
entity=device, key_type=key_type
)
uri = "{}/devices/{}".format(target["entity"], device_id)
try:
parsed_device_cs = parse_iot_device_connection_string(device_cs)
except ValueError as e:
logger.debug(e)
raise CLIError("This device does not support SAS auth.")
key = parsed_device_cs["SharedAccessKey"]
else:
uri = target["entity"]
policy = target["policy"]
key = target["primarykey"] if key_type == "primary" else target["secondarykey"]
return SasTokenAuthentication(uri, policy, key, duration)
def _build_device_or_module_connection_string(entity, key_type="primary"):
is_device = entity.get("moduleId") is None
template = (
"HostName={};DeviceId={};{}"
if is_device
else "HostName={};DeviceId={};ModuleId={};{}"
)
auth = entity["authentication"]
auth_type = auth["type"].lower()
if auth_type == DeviceAuthApiType.sas.value.lower():
key = "SharedAccessKey={}".format(
auth["symmetricKey"]["primaryKey"]
if key_type == "primary"
else auth["symmetricKey"]["secondaryKey"]
)
elif auth_type in [
DeviceAuthApiType.certificateAuthority.value.lower(),
DeviceAuthApiType.selfSigned.value.lower(),
]:
key = "x509=true"
else:
raise CLIError("Unable to form target connection string")
if is_device:
return template.format(entity.get("hub"), entity.get("deviceId"), key)
else:
return template.format(
entity.get("hub"), entity.get("deviceId"), entity.get("moduleId"), key
)
def iot_get_device_connection_string(
cmd,
device_id,
hub_name=None,
key_type="primary",
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
result = {}
device = iot_device_show(
cmd,
device_id,
hub_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
result["connectionString"] = _build_device_or_module_connection_string(
device, key_type
)
return result
def iot_get_module_connection_string(
cmd,
device_id,
module_id,
hub_name=None,
key_type="primary",
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
result = {}
module = iot_device_module_show(
cmd,
device_id,
module_id,
resource_group_name=resource_group_name,
hub_name=hub_name,
login=login,
auth_type_dataplane=auth_type_dataplane,
)
result["connectionString"] = _build_device_or_module_connection_string(
module, key_type
)
return result
# Messaging
def iot_device_send_message(
cmd,
device_id,
hub_name=None,
data="Ping from Az CLI IoT Extension",
properties=None,
msg_count=1,
resource_group_name=None,
login=None
):
from azext_iot.operations._mqtt import mqtt_client
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
if properties:
properties = validate_key_value_pairs(properties)
device = _iot_device_show(target, device_id)
device_connection_string = _build_device_or_module_connection_string(device, KeyType.primary.value)
client_mqtt = mqtt_client(
target=target,
device_conn_string=device_connection_string,
device_id=device_id
)
for _ in range(msg_count):
client_mqtt.send_d2c_message(message_text=data, properties=properties)
client_mqtt.shutdown()
def iot_device_send_message_http(
cmd,
device_id,
data,
hub_name=None,
headers=None,
resource_group_name=None,
login=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_device_send_message_http(target, device_id, data, headers)
def _iot_device_send_message_http(target, device_id, data, headers=None):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.send_device_event(
id=device_id, message=data, custom_headers=headers
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_complete(
cmd, device_id, etag, hub_name=None, resource_group_name=None, login=None
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_complete(target, device_id, etag)
def _iot_c2d_message_complete(target, device_id, etag):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.complete_device_bound_notification(
id=device_id, etag=etag
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_reject(
cmd, device_id, etag, hub_name=None, resource_group_name=None, login=None
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_reject(target, device_id, etag)
def _iot_c2d_message_reject(target, device_id, etag):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.complete_device_bound_notification(
id=device_id, etag=etag, reject=""
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_abandon(
cmd, device_id, etag, hub_name=None, resource_group_name=None, login=None
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_abandon(target, device_id, etag)
def _iot_c2d_message_abandon(target, device_id, etag):
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
try:
return device_sdk.device.abandon_device_bound_notification(
id=device_id, etag=etag
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_receive(
cmd,
device_id,
hub_name=None,
lock_timeout=60,
resource_group_name=None,
login=None,
abandon=None,
complete=None,
reject=None,
):
ack = None
ack_vals = [abandon, complete, reject]
if any(ack_vals):
if len(list(filter(lambda val: val, ack_vals))) > 1:
raise CLIError(
"Only one c2d-message ack argument can be used [--complete, --abandon, --reject]"
)
if abandon:
ack = SettleType.abandon.value
elif complete:
ack = SettleType.complete.value
elif reject:
ack = SettleType.reject.value
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
return _iot_c2d_message_receive(target, device_id, lock_timeout, ack)
def _iot_c2d_message_receive(target, device_id, lock_timeout=60, ack=None):
from azext_iot.constants import MESSAGING_HTTP_C2D_SYSTEM_PROPERTIES
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
request_headers = {}
if lock_timeout:
request_headers["IotHub-MessageLockTimeout"] = str(lock_timeout)
try:
result = device_sdk.device.receive_device_bound_notification(
id=device_id, custom_headers=request_headers, raw=True
).response
if result and result.status_code == 200:
payload = {"properties": {}}
if "etag" in result.headers:
eTag = result.headers["etag"].strip('"')
payload["etag"] = eTag
if ack:
ack_response = {}
if ack == SettleType.abandon.value:
logger.debug("__Abandoning message__")
ack_response = (
device_sdk.device.abandon_device_bound_notification(
id=device_id, etag=eTag, raw=True
)
)
elif ack == SettleType.reject.value:
logger.debug("__Rejecting message__")
ack_response = (
device_sdk.device.complete_device_bound_notification(
id=device_id, etag=eTag, reject="", raw=True
)
)
else:
logger.debug("__Completing message__")
ack_response = (
device_sdk.device.complete_device_bound_notification(
id=device_id, etag=eTag, raw=True
)
)
payload["ack"] = (
ack
if (ack_response and ack_response.response.status_code == 204)
else None
)
app_prop_prefix = "iothub-app-"
app_prop_keys = [
header
for header in result.headers
if header.lower().startswith(app_prop_prefix)
]
app_props = {}
for key in app_prop_keys:
app_props[key[len(app_prop_prefix) :]] = result.headers[key]
if app_props:
payload["properties"]["app"] = app_props
sys_props = {}
for key in MESSAGING_HTTP_C2D_SYSTEM_PROPERTIES:
if key in result.headers:
sys_props[key] = result.headers[key]
if sys_props:
payload["properties"]["system"] = sys_props
if result.content:
target_encoding = result.headers.get("ContentEncoding", "utf-8")
logger.info(f"Decoding message data encoded with: {target_encoding}")
payload["data"] = result.content.decode(target_encoding)
return payload
return
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_c2d_message_send(
cmd,
device_id,
hub_name=None,
data="Ping from Az CLI IoT Extension",
message_id=None,
correlation_id=None,
user_id=None,
content_encoding="utf-8",
content_type=None,
expiry_time_utc=None,
properties=None,
ack=None,
wait_on_feedback=False,
yes=False,
repair=False,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
from azext_iot.common.deps import ensure_uamqp
from azext_iot.common.utility import validate_min_python_version
validate_min_python_version(3, 4)
if wait_on_feedback and not ack:
raise CLIError(
'To wait on device feedback, ack must be "full", "negative" or "positive"'
)
config = cmd.cli_ctx.config
ensure_uamqp(config, yes, repair)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
if properties:
properties = validate_key_value_pairs(properties)
if expiry_time_utc:
now_in_milli = int(time() * 1000)
user_msg_expiry = int(expiry_time_utc)
if user_msg_expiry < now_in_milli:
raise CLIError("Message expiry time utc is in the past!")
from azext_iot.monitor import event
msg_id, errors = event.send_c2d_message(
target=target,
device_id=device_id,
data=data,
message_id=message_id,
correlation_id=correlation_id,
user_id=user_id,
content_encoding=content_encoding,
content_type=content_type,
expiry_time_utc=expiry_time_utc,
properties=properties,
ack=ack,
)
if errors:
raise CLIError(
"C2D message error: {}, use --debug for more details.".format(errors)
)
if wait_on_feedback:
_iot_hub_monitor_feedback(target=target, device_id=device_id, wait_on_id=msg_id)
def iot_simulate_device(
cmd,
device_id,
hub_name=None,
receive_settle="complete",
data="Ping from Az CLI IoT Extension",
msg_count=100,
msg_interval=3,
protocol_type="mqtt",
properties=None,
resource_group_name=None,
login=None,
method_response_code=None,
method_response_payload=None,
init_reported_properties=None
):
import sys
import uuid
import datetime
import json
from azext_iot.operations._mqtt import mqtt_client
from threading import Event, Thread
from tqdm import tqdm
from azext_iot.constants import (
MIN_SIM_MSG_INTERVAL,
MIN_SIM_MSG_COUNT,
SIM_RECEIVE_SLEEP_SEC,
)
protocol_type = protocol_type.lower()
if protocol_type == ProtocolType.mqtt.name:
if receive_settle != "complete":
raise CLIError('mqtt protocol only supports settle type of "complete"')
if msg_interval < MIN_SIM_MSG_INTERVAL:
raise CLIError("msg interval must be at least {}".format(MIN_SIM_MSG_INTERVAL))
if msg_count < MIN_SIM_MSG_COUNT:
raise CLIError("msg count must be at least {}".format(MIN_SIM_MSG_COUNT))
if protocol_type != ProtocolType.mqtt.name:
if method_response_code:
raise CLIError("'method-response-code' not supported, {} doesn't allow direct methods.".format(protocol_type))
if method_response_payload:
raise CLIError("'method-response-payload' not supported, {} doesn't allow direct methods.".format(protocol_type))
if init_reported_properties:
raise CLIError("'init-reported-properties' not supported, {} doesn't allow setting twin props".format(protocol_type))
properties_to_send = _iot_simulate_get_default_properties(protocol_type)
user_properties = validate_key_value_pairs(properties) or {}
properties_to_send.update(user_properties)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
if method_response_payload:
method_response_payload = process_json_arg(
method_response_payload, argument_name="method-response-payload"
)
if init_reported_properties:
init_reported_properties = process_json_arg(
init_reported_properties, argument_name="init-reported-properties"
)
class generator(object):
def __init__(self):
self.calls = 0
def generate(self, jsonify=True):
self.calls += 1
payload = {
"id": str(uuid.uuid4()),
"timestamp": str(datetime.datetime.utcnow()),
"data": str(data + " #{}".format(self.calls)),
}
return json.dumps(payload) if jsonify else payload
cancellation_token = Event()
def http_wrap(target, device_id, generator, msg_interval, msg_count):
for _ in tqdm(range(0, msg_count), desc='Sending and receiving events via https', ascii=' #'):
d = generator.generate(False)
_iot_device_send_message_http(target, device_id, d, headers=properties_to_send)
if cancellation_token.wait(msg_interval):
break
try:
device = _iot_device_show(target, device_id)
if protocol_type == ProtocolType.mqtt.name:
device_connection_string = _build_device_or_module_connection_string(device, KeyType.primary.value)
if device and device.get("authentication", {}).get("type", "") != DeviceAuthApiType.sas.value:
raise CLIError('MQTT simulation is only supported for symmetric key auth (SAS) based devices')
client_mqtt = mqtt_client(
target=target,
device_conn_string=device_connection_string,
device_id=device_id,
method_response_code=method_response_code,
method_response_payload=method_response_payload,
init_reported_properties=init_reported_properties
)
client_mqtt.execute(data=generator(), properties=properties_to_send, publish_delay=msg_interval, msg_count=msg_count)
client_mqtt.shutdown()
else:
op = Thread(target=http_wrap, args=(target, device_id, generator(), msg_interval, msg_count))
op.start()
while op.is_alive():
_handle_c2d_msg(target, device_id, receive_settle)
sleep(SIM_RECEIVE_SLEEP_SEC)
except KeyboardInterrupt:
sys.exit()
except Exception as x:
raise CLIError(x)
finally:
if cancellation_token:
cancellation_token.set()
def iot_c2d_message_purge(
cmd,
device_id,
hub_name=None,
resource_group_name=None,
login=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
)
resolver = SdkResolver(target=target)
service_sdk = resolver.get_sdk(SdkType.service_sdk)
return service_sdk.cloud_to_device_messages.purge_cloud_to_device_message_queue(
device_id
)
def _iot_simulate_get_default_properties(protocol):
default_properties = {}
is_mqtt = protocol == ProtocolType.mqtt.name
default_properties["$.ct" if is_mqtt else "content-type"] = "application/json"
default_properties["$.ce" if is_mqtt else "content-encoding"] = "utf-8"
return default_properties
def _handle_c2d_msg(target, device_id, receive_settle, lock_timeout=60):
result = _iot_c2d_message_receive(target, device_id, lock_timeout)
if result:
print()
print("C2D Message Handler [Received C2D message]:")
printer.pprint(result)
if receive_settle == "reject":
print("C2D Message Handler [Rejecting message]")
_iot_c2d_message_reject(target, device_id, result["etag"])
elif receive_settle == "abandon":
print("C2D Message Handler [Abandoning message]")
_iot_c2d_message_abandon(target, device_id, result["etag"])
else:
print("C2D Message Handler [Completing message]")
_iot_c2d_message_complete(target, device_id, result["etag"])
return True
return False
def iot_device_export(
cmd,
hub_name,
blob_container_uri,
include_keys=False,
storage_authentication_type=None,
identity=None,
resource_group_name=None,
):
from azext_iot._factory import iot_hub_service_factory
client = iot_hub_service_factory(cmd.cli_ctx)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name
)
if exists(blob_container_uri):
blob_container_uri = read_file_content(blob_container_uri)
if ensure_iothub_sdk_min_version("0.12.0"):
from azure.mgmt.iothub.models import ExportDevicesRequest
from azext_iot.common.shared import AuthenticationType
storage_authentication_type = (
AuthenticationType(storage_authentication_type).name
if storage_authentication_type
else None
)
export_request = ExportDevicesRequest(
export_blob_container_uri=blob_container_uri,
exclude_keys=not include_keys,
authentication_type=storage_authentication_type,
)
user_identity = identity not in [None, "[system]"]
if (
user_identity
and storage_authentication_type != AuthenticationType.identityBased.name
):
raise CLIError(
"Device export with user-assigned identities requires identity-based authentication [--storage-auth-type]"
)
# Track 2 CLI SDKs provide support for user-assigned identity objects
if (
ensure_iothub_sdk_min_version(IOTHUB_TRACK_2_SDK_MIN_VERSION)
and user_identity
):
from azure.mgmt.iothub.models import (
ManagedIdentity,
) # pylint: disable=no-name-in-module
export_request.identity = ManagedIdentity(user_assigned_identity=identity)
# if the user supplied a user-assigned identity, let them know they need a new CLI/SDK
elif user_identity:
raise CLIError(
"Device export with user-assigned identities requires a dependency of azure-mgmt-iothub>={}".format(
IOTHUB_TRACK_2_SDK_MIN_VERSION
)
)
return client.export_devices(
target["resourcegroup"],
hub_name,
export_devices_parameters=export_request,
)
if storage_authentication_type:
raise CLIError(
"Device export authentication-type properties require a dependency of azure-mgmt-iothub>=0.12.0"
)
return client.export_devices(
target["resourcegroup"],
hub_name,
export_blob_container_uri=blob_container_uri,
exclude_keys=not include_keys,
)
def iot_device_import(
cmd,
hub_name,
input_blob_container_uri,
output_blob_container_uri,
storage_authentication_type=None,
resource_group_name=None,
identity=None,
):
from azext_iot._factory import iot_hub_service_factory
client = iot_hub_service_factory(cmd.cli_ctx)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name
)
if exists(input_blob_container_uri):
input_blob_container_uri = read_file_content(input_blob_container_uri)
if exists(output_blob_container_uri):
output_blob_container_uri = read_file_content(output_blob_container_uri)
if ensure_iothub_sdk_min_version("0.12.0"):
from azure.mgmt.iothub.models import ImportDevicesRequest
from azext_iot.common.shared import AuthenticationType
storage_authentication_type = (
AuthenticationType(storage_authentication_type).name
if storage_authentication_type
else None
)
import_request = ImportDevicesRequest(
input_blob_container_uri=input_blob_container_uri,
output_blob_container_uri=output_blob_container_uri,
input_blob_name=None,
output_blob_name=None,
authentication_type=storage_authentication_type,
)
user_identity = identity not in [None, "[system]"]
if (
user_identity
and storage_authentication_type != AuthenticationType.identityBased.name
):
raise CLIError(
"Device import with user-assigned identities requires identity-based authentication [--storage-auth-type]"
)
# Track 2 CLI SDKs provide support for user-assigned identity objects
if (
ensure_iothub_sdk_min_version(IOTHUB_TRACK_2_SDK_MIN_VERSION)
and user_identity
):
from azure.mgmt.iothub.models import (
ManagedIdentity,
) # pylint: disable=no-name-in-module
import_request.identity = ManagedIdentity(user_assigned_identity=identity)
# if the user supplied a user-assigned identity, let them know they need a new CLI/SDK
elif user_identity:
raise CLIError(
"Device import with user-assigned identities requires a dependency of azure-mgmt-iothub>={}".format(
IOTHUB_TRACK_2_SDK_MIN_VERSION
)
)
return client.import_devices(
target["resourcegroup"],
hub_name,
import_devices_parameters=import_request,
)
if storage_authentication_type:
raise CLIError(
"Device import authentication-type properties require a dependency of azure-mgmt-iothub>=0.12.0"
)
return client.import_devices(
target["resourcegroup"],
hub_name,
input_blob_container_uri=input_blob_container_uri,
output_blob_container_uri=output_blob_container_uri,
)
def iot_device_upload_file(
cmd,
device_id,
file_path,
content_type,
hub_name=None,
resource_group_name=None,
login=None,
):
from azext_iot.sdk.iothub.device.models import FileUploadCompletionStatus
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name, resource_group_name=resource_group_name, login=login
)
resolver = SdkResolver(target=target, device_id=device_id)
device_sdk = resolver.get_sdk(SdkType.device_sdk)
if not exists(file_path):
raise CLIError('File path "{}" does not exist!'.format(file_path))
content = read_file_content(file_path)
file_name = basename(file_path)
try:
upload_meta = device_sdk.device.create_file_upload_sas_uri(
device_id=device_id, blob_name=file_name, raw=True
).response.json()
storage_endpoint = "{}/{}/{}{}".format(
upload_meta["hostName"],
upload_meta["containerName"],
upload_meta["blobName"],
upload_meta["sasToken"],
)
completion_status = FileUploadCompletionStatus(
correlation_id=upload_meta["correlationId"], is_success=True
)
upload_response = device_sdk.device.upload_file_to_container(
storage_endpoint=storage_endpoint,
content=content,
content_type=content_type,
)
completion_status.status_code = upload_response.status_code
completion_status.status_reason = upload_response.reason
return device_sdk.device.update_file_upload_status(
device_id=device_id, file_upload_completion_status=completion_status
)
except CloudError as e:
raise CLIError(unpack_msrest_error(e))
def iot_hub_monitor_events(
cmd,
hub_name=None,
device_id=None,
interface=None,
module_id=None,
consumer_group="$Default",
timeout=300,
enqueued_time=None,
resource_group_name=None,
yes=False,
properties=None,
repair=False,
login=None,
content_type=None,
device_query=None,
):
try:
_iot_hub_monitor_events(
cmd,
hub_name=hub_name,
device_id=device_id,
interface_name=interface,
module_id=module_id,
consumer_group=consumer_group,
timeout=timeout,
enqueued_time=enqueued_time,
resource_group_name=resource_group_name,
yes=yes,
properties=properties,
repair=repair,
login=login,
content_type=content_type,
device_query=device_query,
)
except RuntimeError as e:
raise CLIError(e)
def iot_hub_monitor_feedback(
cmd,
hub_name=None,
device_id=None,
yes=False,
wait_on_id=None,
repair=False,
resource_group_name=None,
login=None,
auth_type_dataplane=None,
):
from azext_iot.common.deps import ensure_uamqp
from azext_iot.common.utility import validate_min_python_version
validate_min_python_version(3, 4)
config = cmd.cli_ctx.config
ensure_uamqp(config, yes, repair)
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
login=login,
auth_type=auth_type_dataplane,
)
return _iot_hub_monitor_feedback(
target=target, device_id=device_id, wait_on_id=wait_on_id
)
def iot_hub_distributed_tracing_show(
cmd,
hub_name,
device_id,
resource_group_name=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
auth_type=auth_type_dataplane,
)
device_twin = _iot_hub_distributed_tracing_show(target=target, device_id=device_id)
return _customize_device_tracing_output(
device_twin["deviceId"],
device_twin["properties"]["desired"],
device_twin["properties"]["reported"],
)
def _iot_hub_monitor_events(
cmd,
interface_name=None,
module_id=None,
hub_name=None,
device_id=None,
consumer_group="$Default",
timeout=300,
enqueued_time=None,
resource_group_name=None,
yes=False,
properties=None,
repair=False,
login=None,
content_type=None,
device_query=None,
):
(enqueued_time, properties, timeout, output) = init_monitoring(
cmd, timeout, properties, enqueued_time, repair, yes
)
device_ids = {}
if device_query:
devices_result = iot_query(
cmd, device_query, hub_name, None, resource_group_name, login=login
)
if devices_result:
for device_result in devices_result:
device_ids[device_result["deviceId"]] = True
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
include_events=True,
login=login,
)
from azext_iot.monitor.builders import hub_target_builder
from azext_iot.monitor.handlers import CommonHandler
from azext_iot.monitor.telemetry import start_single_monitor
from azext_iot.monitor.utility import generate_on_start_string
from azext_iot.monitor.models.arguments import (
CommonParserArguments,
CommonHandlerArguments,
)
target = hub_target_builder.EventTargetBuilder().build_iot_hub_target(target)
target.add_consumer_group(consumer_group)
on_start_string = generate_on_start_string(device_id=device_id)
parser_args = CommonParserArguments(
properties=properties, content_type=content_type
)
handler_args = CommonHandlerArguments(
output=output,
common_parser_args=parser_args,
devices=device_ids,
device_id=device_id,
interface_name=interface_name,
module_id=module_id,
)
handler = CommonHandler(handler_args)
start_single_monitor(
target=target,
enqueued_time_utc=enqueued_time,
on_start_string=on_start_string,
on_message_received=handler.parse_message,
timeout=timeout,
)
def iot_hub_distributed_tracing_update(
cmd,
hub_name,
device_id,
sampling_mode,
sampling_rate,
resource_group_name=None,
auth_type_dataplane=None,
):
discovery = IotHubDiscovery(cmd)
target = discovery.get_target(
resource_name=hub_name,
resource_group_name=resource_group_name,
include_events=True,
auth_type=auth_type_dataplane,
)
if int(sampling_rate) not in range(0, 101):
raise CLIError(
"Sampling rate is a percentage, So only values from 0 to 100(inclusive) are permitted."
)
device_twin = _iot_hub_distributed_tracing_show(target=target, device_id=device_id)
if TRACING_PROPERTY not in device_twin["properties"]["desired"]:
device_twin["properties"]["desired"][TRACING_PROPERTY] = {}
device_twin["properties"]["desired"][TRACING_PROPERTY]["sampling_rate"] = int(
sampling_rate
)
device_twin["properties"]["desired"][TRACING_PROPERTY]["sampling_mode"] = (
1 if sampling_mode.lower() == "on" else 2
)
result = iot_device_twin_update(
cmd, device_id, device_twin, hub_name, resource_group_name
)
return _customize_device_tracing_output(
result.device_id, result.properties.desired, result.properties.reported
)
def iot_hub_connection_string_show(
cmd,
hub_name=None,
resource_group_name=None,
policy_name="iothubowner",
key_type=KeyType.primary.value,
show_all=False,
default_eventhub=False,
):
discovery = IotHubDiscovery(cmd)
if hub_name is None:
hubs = discovery.get_resources(resource_group_name)
if hubs is None:
raise CLIError("No IoT Hub found.")
def conn_str_getter(hub):
return _get_hub_connection_string(
discovery, hub, policy_name, key_type, show_all, default_eventhub
)
connection_strings = []
for hub in hubs:
if hub.properties.state == IoTHubStateType.Active.value:
try:
connection_strings.append(
{
"name": hub.name,
"connectionString": conn_str_getter(hub)
if show_all
else conn_str_getter(hub)[0],
}
)
except Exception:
logger.warning(
f"Warning: The IoT Hub {hub.name} in resource group "
+ f"{hub.additional_properties['resourcegroup']} does "
+ f"not have the target policy {policy_name}."
)
else:
logger.warning(
f"Warning: The IoT Hub {hub.name} in resource group "
+ f"{hub.additional_properties['resourcegroup']} is skipped "
+ "because the hub is not active."
)
return connection_strings
hub = discovery.find_resource(hub_name, resource_group_name)
if hub:
conn_str = _get_hub_connection_string(
discovery, hub, policy_name, key_type, show_all, default_eventhub
)
return {"connectionString": conn_str if show_all else conn_str[0]}
def _get_hub_connection_string(
discovery, hub, policy_name, key_type, show_all, default_eventhub
):
policies = []
if show_all:
policies.extend(
discovery.get_policies(hub.name, hub.additional_properties["resourcegroup"])
)
else:
policies.append(
discovery.find_policy(
hub.name, hub.additional_properties["resourcegroup"], policy_name
)
)
if default_eventhub:
cs_template_eventhub = (
"Endpoint={};SharedAccessKeyName={};SharedAccessKey={};EntityPath={}"
)
endpoint = hub.properties.event_hub_endpoints["events"].endpoint
entityPath = hub.properties.event_hub_endpoints["events"].path
return [
cs_template_eventhub.format(
endpoint,
p.key_name,
p.secondary_key
if key_type == KeyType.secondary.value
else p.primary_key,
entityPath,
)
for p in policies
if "serviceconnect"
in (
p.rights.value.lower()
if isinstance(p.rights, (Enum, EnumMeta))
else p.rights.lower()
)
]
hostname = hub.properties.host_name
cs_template = "HostName={};SharedAccessKeyName={};SharedAccessKey={}"
return [
cs_template.format(
hostname,
p.key_name,
p.secondary_key if key_type == KeyType.secondary.value else p.primary_key,
)
for p in policies
]
def _iot_hub_monitor_feedback(target, device_id, wait_on_id):
from azext_iot.monitor import event
event.monitor_feedback(
target=target, device_id=device_id, wait_on_id=wait_on_id, token_duration=3600
)
def _iot_hub_distributed_tracing_show(target, device_id):
device_twin = _iot_device_twin_show(target=target, device_id=device_id)
_validate_device_tracing(target, device_twin)
return device_twin
def _validate_device_tracing(target, device_twin):
if target["location"].lower() not in TRACING_ALLOWED_FOR_LOCATION:
raise CLIError(
'Distributed tracing isn\'t supported for the hub located at "{}" location.'.format(
target["location"]
)
)
if target["sku_tier"].lower() != TRACING_ALLOWED_FOR_SKU:
raise CLIError(
'Distributed tracing isn\'t supported for the hub belongs to "{}" sku tier.'.format(
target["sku_tier"]
)
)
if device_twin["capabilities"]["iotEdge"]:
raise CLIError(
'The device "{}" should be non-edge device.'.format(device_twin["deviceId"])
)
def _customize_device_tracing_output(device_id, desired, reported):
output = {}
desired_tracing = desired.get(TRACING_PROPERTY, None)
if desired_tracing:
output["deviceId"] = device_id
output["samplingMode"] = (
"enabled" if desired_tracing.get("sampling_mode") == 1 else "disabled"
)
output["samplingRate"] = "{}%".format(desired_tracing.get("sampling_rate"))
output["isSynced"] = False
reported_tracing = reported.get(TRACING_PROPERTY, None)
if (
reported_tracing
and desired_tracing.get("sampling_mode")
== reported_tracing.get("sampling_mode").get("value", None)
and desired_tracing.get("sampling_rate")
== reported_tracing.get("sampling_rate").get("value", None)
):
output["isSynced"] = True
return output
|
__init__.py
|
"""Client API for Camect."""
import asyncio
import base64
import json
import logging
import ssl
import sys
from threading import Thread
import time
from typing import Callable, Dict, List
import urllib3
import requests
import websockets
EMBEDDED_BUNDLE_JS = "js/embedded_bundle.min.js"
_LOGGER = logging.getLogger(__name__)
def set_log_level(level: int):
_LOGGER.setLevel(level)
def get_log_level() -> int:
return _LOGGER.getEffectiveLevel()
def log_to_console():
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s [%(name)s %(levelname)s] %(message)s')
handler.setFormatter(formatter)
_LOGGER.addHandler(handler)
EventListener = Callable[[Dict[str, str]], None]
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class Error(Exception):
pass
class Home:
"""Client talking to Camect home server.
Usage:
import camect
home = camect.Home("camect.local:9443", "admin", "xxx")
home.get_name()
home.add_event_listener(lambda evt: print(evt))
"""
def __init__(self, server_addr: str, user: str, password: str) -> None:
self._server_addr = server_addr
self._api_prefix = f"https://{server_addr}/api/"
self._ws_uri = f"wss://{server_addr}/api/event_ws"
self._user = user
self._password = password
# Make sure it connects.
self.get_info()
self._evt_listeners_ = []
self._evt_loop = asyncio.new_event_loop()
evt_thread = Thread(
target=self._evt_loop.run_until_complete, args=(self._event_handler(),))
evt_thread.daemon = True
evt_thread.start()
def get_id(self) -> str:
info = self.get_info()
if info:
return info["id"]
return ""
def get_name(self) -> str:
info = self.get_info()
if info:
return info["name"]
return ""
def get_mode(self) -> str:
info = self.get_info()
if info:
return info["mode"]
return ""
def get_cloud_url(self, path) -> str:
info = self.get_info()
if info:
return info["cloud_url"] + path
return ""
# The returned URL needs internet and may not work in certain network environment.
def get_local_https_url(self, path: str) -> str:
info = self.get_info()
if info:
return info["local_https_url"] + path + "?X-AUTHORIZATION=" + self._authorization()
return ""
# The returned URL needs internet and may not work in certain network environment.
def get_local_websocket_url(self) -> str:
return self.get_local_https_url("webrtc/ws").replace("https://", "wss://")
# The returned URL has invalid TLS certificate.
def get_unsecure_https_url(self, path: str) -> str:
return f"https://{self._server_addr}/{path}?X-AUTHORIZATION=" + self._authorization()
# The returned URL has invalid TLS certificate.
def get_unsecure_websocket_url(self) -> str:
return self.get_unsecure_https_url("webrtc/ws").replace("https://", "wss://")
def get_info(self) -> Dict[str, str]:
resp = requests.get(
self._api_prefix + "GetHomeInfo", verify=False, auth=(self._user, self._password))
json = resp.json()
if resp.status_code != 200:
raise Error("Failed to get home info: [%d](%s)" % (resp.status_code, json["err_msg"]))
return json
def set_name(self, name: str) -> None:
resp = requests.get(
self._api_prefix + "SetHomeName", verify=False, auth=(self._user, self._password),
params={"Name": name})
if resp.status_code != 200:
raise Error("Failed to set home name to '%s': [%d](%s)" % (name,
resp.status_code, resp.json()["err_msg"]))
def set_mode(self, mode: str) -> None:
resp = requests.get(
self._api_prefix + "SetOperationMode", verify=False, auth=(self._user, self._password),
params={"Mode": mode})
if resp.status_code != 200:
raise Error("Failed to set operation mode to '%s': [%d](%s)" % (mode,
resp.status_code, resp.json()["err_msg"]))
def list_cameras(self) -> List[Dict[str, str]]:
resp = requests.get(
self._api_prefix + "ListCameras", verify=False, auth=(self._user, self._password))
json = resp.json()
if resp.status_code != 200:
raise Error("Failed to get home info: [%d](%s)" % (resp.status_code, json["err_msg"]))
return json["camera"]
def snapshot_camera(self, cam_id: str, width: int = 0, height: int = 0) -> bytes:
resp = requests.get(
self._api_prefix + "SnapshotCamera", verify=False, auth=(self._user, self._password),
params={"CamId": cam_id, "Width": str(width), "Height": str(height)})
json = resp.json()
if resp.status_code != 200:
raise Error("Failed to snapshot camera: [%d](%s)" % (resp.status_code, json["err_msg"]))
return json["jpeg_data"]
def disable_alert(self, cam_ids: List[str], reason: str):
""" Disable alerts for camera(s) or the home if "cam_ids" is empty.
"""
self._enable_alert(cam_ids, False, reason)
def enable_alert(self, cam_ids: List[str], reason: str):
""" Enable alerts for camera(s) or the home if "cam_ids" is empty.
NOTE: This method can only undo disable_alert. It has no effect if disable_alert was not
called before.
Please make sure that "reason" is same as you called disable_alert.
"""
self._enable_alert(cam_ids, True, reason)
def _enable_alert(self, cam_ids: List[str], enable: bool, reason: str):
params = { "Reason": reason }
if enable:
params["Enable"] = "1"
for i in range(len(cam_ids)):
key = "CamId[%d]" % (i)
params[key] = cam_ids[i]
resp = requests.get(
self._api_prefix + "EnableAlert", verify=False, auth=(self._user, self._password),
params=params)
json = resp.json()
if resp.status_code != 200:
_LOGGER.error(
"Failed to enable/disable alert: [%d](%s)", resp.status_code, json["err_msg"])
def start_hls(self, cam_id: str) -> str:
""" Start HLS the camera. Returns the HLS URL.
The URL expires after it's been idle for 1 minute.
NOTE: This is an experimental feature, only available for pro units now.
"""
resp = requests.get(
self._api_prefix + "StartStreaming", verify=False, auth=(self._user, self._password),
params={ "Type": "1", "CamId": cam_id, "StreamingHost": self._server_addr })
json = resp.json()
if resp.status_code != 200:
_LOGGER.error(
"Failed to start HLS: [%d](%s)", resp.status_code, json["err_msg"])
return json["hls_url"]
def add_event_listener(self, cb: EventListener) -> None:
self._evt_loop.call_soon_threadsafe(self._evt_listeners_.append, cb)
def del_event_listener(self, cb: EventListener) -> None:
self._evt_loop.call_soon_threadsafe(self._evt_listeners_.remove, cb)
def _authorization(self) -> str:
return base64.b64encode(f"{self._user}:{self._password}".encode()).decode()
async def _event_handler(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_NONE
authorization = "Basic " + self._authorization()
while(True):
try:
_LOGGER.info("Connecting to Camect Home at '%s' ...", self._ws_uri)
websocket = await websockets.connect(self._ws_uri, ssl=context,
extra_headers={"Authorization": authorization})
try:
async for msg in websocket:
_LOGGER.debug("Received event: %s", msg)
try:
evt = json.loads(msg)
for cb in self._evt_listeners_:
cb(evt)
except json.decoder.JSONDecodeError as err:
_LOGGER.error("Invalid JSON '%s': %s", msg, err)
except (websockets.exceptions.ConnectionClosed, OSError):
_LOGGER.warning("Websocket to Camect Home was closed.")
await asyncio.sleep(5)
except (ConnectionRefusedError, ConnectionError):
_LOGGER.warning("Cannot connect Camect Home.")
await asyncio.sleep(10)
except:
e = sys.exc_info()[0]
_LOGGER.warning("Unexpected exception: %s", e)
await asyncio.sleep(10)
except (OSError, ConnectionError):
_LOGGER.warning("Cannot connect Camect Home.")
await asyncio.sleep(10)
except:
e = sys.exc_info()[0]
_LOGGER.warning("Unexpected exception: %s", e)
await asyncio.sleep(10)
|
RUNThread.py
|
from NIC_package_get import NICRUN
from threading import Thread
from ScanapiCookie import RUN_COOKIE
import os
import requests
from flask import Flask, jsonify
from scanapi.v5.RepeterByRequests import RUNRepeter
import time
class spider():
def __init__(self):
self.WEBSITE=os.environ.get('WEBSITE')
self.RUNWAY=os.environ.get('RUNWAY')
self.USERNAME=os.environ.get('USERNAME')
if self.RUNWAY == 'cookie':
self.COOKIE1 = os.environ.get('USERCOOKIE1')
self.COOKIE2 = os.environ.get('USERCOOKIE2')
elif self.RUNWAY == 'userid':
self.USERID1 = os.environ.get('USERID1')
self.PASSWD1 = os.environ.get('PASSWD1')
self.USERID2 = os.environ.get('USERID2')
self.PASSWD2 = os.environ.get('PASSWD2')
def RUNThread_Cookie(self):
needpcap = 1000
thread1 = Thread(target=NICRUN, args=['eth0', needpcap, self.USERNAME])
# 使用多线程
thread1.start()
# thread1.join()
# print("NICRUN('eth0', {0}, {1})".format(needpcap, self.USERNAME))
# NICRUN('eth0', needpcap, self.USERNAME)
thread2 = Thread(target=RUN_COOKIE, args=[self.WEBSITE, self.COOKIE1])
thread2.start()
# thread2.join()
# RUN_COOKIE(self.WEBSITE, self.COOKIE1)
# RUN_COOKIE(self.WEBSITE, self.COOKIE1)
# RUN_COOKIE('https://jkxxcj.zjhu.edu.cn/serviceList.html','health-data-Id=MGQ0MTM0YmQtMWQ2NC00MGViLTkzMGMtODNkZDM4ODU3YjJi')
time.sleep(3)
flag = 0
while os.path.getsize('/opt/spider/{0}/{0}.txt'.format(self.USERNAME)) != 0 and flag == 0:
if flag == 0:
flag=1
thread = Thread(target=RUNRepeter, args=[self.USERNAME])
thread.start()
break
def RUNThread_Userid(self):
needpcap = 1000
thread = Thread(target=NICRUN, args=['eth0', needpcap, self.USERNAME])
thread.start()
print('userid')
print(self.USERID1, self.PASSWD1, self.USERID2, self.PASSWD2)
def get_txt_file(filename):
if '\\' in filename:
dirpath = os.path.abspath('.')
filename = os.path.join(dirpath, filename).replace('\\', '\\\\')
with open(filename) as f:
s=f.read()
f.close()
return str(s)
def nlp_file(username,COOKIE1,COOKIE2):
flag = 0
while os.path.getsize('/opt/spider/{0}/{0}.txt'.format(username)) != 0 and flag == 0:
if flag == 0:
flag=1
RUNRepeter(username,COOKIE1,COOKIE2)
# thread = Thread(target=RUNRepeter, args=[username])
# thread.start()
break
app = Flask(__name__)
@app.route("/")
def index():
filename = "/opt/spider/{0}/{0}.txt".format(os.environ.get('USERNAME'))
result = get_txt_file(filename)
if result:
nlp_file(os.environ.get('USERNAME'),os.environ.get('COOKIE1'),os.environ.get('COOKIE2'))
return jsonify({
"result": result,
"code": "200"
})
else:
return jsonify({
"result": "wating pcap",
"code": "404"
})
def app_run(host, port):
app.run(host=host, port=int(port))
if __name__ == '__main__':
# thread = Thread(target=app_run, args=['0.0.0.0', 81])
# thread.start()
# app.run(host='0.0.0.0', port=81)
a = spider()
if a.RUNWAY=='cookie':
a.RUNThread_Cookie()
else:
a.RUNThread_Userid()
|
test_sampler.py
|
import multiprocessing
import random
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
from unittest.mock import Mock
from unittest.mock import patch
import warnings
import _pytest.capture
import numpy as np
import pytest
import optuna
from optuna import distributions
from optuna import TrialPruned
from optuna.samplers import _tpe
from optuna.samplers import TPESampler
from optuna.study.study import create_study
from optuna.trial import Trial
@pytest.mark.parametrize("use_hyperband", [False, True])
def test_hyperopt_parameters(use_hyperband: bool) -> None:
sampler = TPESampler(**TPESampler.hyperopt_parameters())
study = optuna.create_study(
sampler=sampler, pruner=optuna.pruners.HyperbandPruner() if use_hyperband else None
)
study.optimize(lambda t: t.suggest_float("x", 10, 20), n_trials=50)
def test_multivariate_experimental_warning() -> None:
with pytest.warns(optuna.exceptions.ExperimentalWarning):
optuna.samplers.TPESampler(multivariate=True)
def test_warn_independent_sampling(capsys: _pytest.capture.CaptureFixture) -> None:
def objective(trial: Trial) -> float:
x = trial.suggest_categorical("x", ["a", "b"])
if x == "a":
return trial.suggest_float("y", 0, 1)
else:
return trial.suggest_float("z", 0, 1)
# We need to reconstruct our default handler to properly capture stderr.
optuna.logging._reset_library_root_logger()
optuna.logging.enable_default_handler()
optuna.logging.set_verbosity(optuna.logging.WARNING)
sampler = TPESampler(multivariate=True, warn_independent_sampling=True, n_startup_trials=0)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
_, err = capsys.readouterr()
assert err
def test_warn_independent_sampling_group(capsys: _pytest.capture.CaptureFixture) -> None:
def objective(trial: Trial) -> float:
x = trial.suggest_categorical("x", ["a", "b"])
if x == "a":
return trial.suggest_float("y", 0, 1)
else:
return trial.suggest_float("z", 0, 1)
# We need to reconstruct our default handler to properly capture stderr.
optuna.logging._reset_library_root_logger()
optuna.logging.enable_default_handler()
optuna.logging.set_verbosity(optuna.logging.WARNING)
sampler = TPESampler(
multivariate=True, warn_independent_sampling=True, group=True, n_startup_trials=0
)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, n_trials=10)
_, err = capsys.readouterr()
assert err == ""
def test_infer_relative_search_space() -> None:
sampler = TPESampler()
search_space = {
"a": distributions.FloatDistribution(1.0, 100.0),
"b": distributions.FloatDistribution(1.0, 100.0, log=True),
"c": distributions.FloatDistribution(1.0, 100.0, step=3.0),
"d": distributions.IntDistribution(1, 100),
"e": distributions.IntDistribution(0, 100, step=2),
"f": distributions.IntDistribution(1, 100, log=True),
"g": distributions.CategoricalDistribution(["x", "y", "z"]),
}
def obj(t: Trial) -> float:
t.suggest_float("a", 1.0, 100.0)
t.suggest_float("b", 1.0, 100.0, log=True)
t.suggest_float("c", 1.0, 100.0, step=3.0)
t.suggest_int("d", 1, 100)
t.suggest_int("e", 0, 100, step=2)
t.suggest_int("f", 1, 100, log=True)
t.suggest_categorical("g", ["x", "y", "z"])
return 0.0
# Study and frozen-trial are not supposed to be accessed.
study1 = Mock(spec=[])
frozen_trial = Mock(spec=[])
assert sampler.infer_relative_search_space(study1, frozen_trial) == {}
study2 = optuna.create_study(sampler=sampler)
study2.optimize(obj, n_trials=1)
assert sampler.infer_relative_search_space(study2, study2.best_trial) == {}
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(multivariate=True)
study3 = optuna.create_study(sampler=sampler)
study3.optimize(obj, n_trials=1)
assert sampler.infer_relative_search_space(study3, study3.best_trial) == search_space
@pytest.mark.parametrize("multivariate", [False, True])
def test_sample_relative_empty_input(multivariate: bool) -> None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(multivariate=multivariate)
# A frozen-trial is not supposed to be accessed.
study = optuna.create_study()
frozen_trial = Mock(spec=[])
assert sampler.sample_relative(study, frozen_trial, {}) == {}
def test_sample_relative_seed_fix() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 8)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
suggestion = sampler.sample_relative(study, trial, {"param-a": dist})
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_relative(study, trial, {"param-a": dist}) == suggestion
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=1, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_relative(study, trial, {"param-a": dist}) != suggestion
def test_sample_relative_prior() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 8)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
suggestion = sampler.sample_relative(study, trial, {"param-a": dist})
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(consider_prior=False, n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_relative(study, trial, {"param-a": dist}) != suggestion
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(prior_weight=0.2, n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_relative(study, trial, {"param-a": dist}) != suggestion
def test_sample_relative_n_startup_trial() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
# sample_relative returns {} for only 4 observations.
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials[:4]):
assert sampler.sample_relative(study, trial, {"param-a": dist}) == {}
# sample_relative returns some value for only 7 observations.
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert "param-a" in sampler.sample_relative(study, trial, {"param-a": dist}).keys()
def test_sample_relative_misc_arguments() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 40)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(40)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
suggestion = sampler.sample_relative(study, trial, {"param-a": dist})
# Test misc. parameters.
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_ei_candidates=13, n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_relative(study, trial, {"param-a": dist}) != suggestion
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(gamma=lambda _: 5, n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_relative(study, trial, {"param-a": dist}) != suggestion
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(
weights=lambda n: np.asarray([i**2 + 1 for i in range(n)]),
n_startup_trials=5,
seed=0,
multivariate=True,
)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_relative(study, trial, {"param-a": dist}) != suggestion
def test_sample_relative_uniform_distributions() -> None:
study = optuna.create_study()
# Prepare sample from uniform distribution for cheking other distributions.
uni_dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=uni_dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
uniform_suggestion = sampler.sample_relative(study, trial, {"param-a": uni_dist})
assert 1.0 <= uniform_suggestion["param-a"] < 100.0
def test_sample_relative_log_uniform_distributions() -> None:
"""Prepare sample from uniform distribution for cheking other distributions."""
study = optuna.create_study()
uni_dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=uni_dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
uniform_suggestion = sampler.sample_relative(study, trial, {"param-a": uni_dist})
# Test sample from log-uniform is different from uniform.
log_dist = optuna.distributions.FloatDistribution(1.0, 100.0, log=True)
past_trials = [frozen_trial_factory(i, dist=log_dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
loguniform_suggestion = sampler.sample_relative(study, trial, {"param-a": log_dist})
assert 1.0 <= loguniform_suggestion["param-a"] < 100.0
assert uniform_suggestion["param-a"] != loguniform_suggestion["param-a"]
def test_sample_relative_disrete_uniform_distributions() -> None:
"""Test samples from discrete have expected intervals."""
study = optuna.create_study()
disc_dist = optuna.distributions.FloatDistribution(1.0, 100.0, step=0.1)
def value_fn(idx: int) -> float:
random.seed(idx)
return int(random.random() * 1000) * 0.1
past_trials = [frozen_trial_factory(i, dist=disc_dist, value_fn=value_fn) for i in range(1, 8)]
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
discrete_uniform_suggestion = sampler.sample_relative(study, trial, {"param-a": disc_dist})
assert 1.0 <= discrete_uniform_suggestion["param-a"] <= 100.0
np.testing.assert_almost_equal(
int(discrete_uniform_suggestion["param-a"] * 10),
discrete_uniform_suggestion["param-a"] * 10,
)
def test_sample_relative_categorical_distributions() -> None:
"""Test samples are drawn from the specified category."""
study = optuna.create_study()
categories = [i * 0.3 + 1.0 for i in range(330)]
def cat_value_fn(idx: int) -> float:
random.seed(idx)
return categories[random.randint(0, len(categories) - 1)]
cat_dist = optuna.distributions.CategoricalDistribution(categories)
past_trials = [
frozen_trial_factory(i, dist=cat_dist, value_fn=cat_value_fn) for i in range(1, 8)
]
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
categorical_suggestion = sampler.sample_relative(study, trial, {"param-a": cat_dist})
assert categorical_suggestion["param-a"] in categories
@pytest.mark.parametrize("step", [1, 2])
def test_sample_relative_int_uniform_distributions(step: int) -> None:
"""Test sampling from int distribution returns integer."""
study = optuna.create_study()
def int_value_fn(idx: int) -> float:
random.seed(idx)
return step * random.randint(0, 100 // step)
int_dist = optuna.distributions.IntDistribution(0, 100, step=step)
past_trials = [
frozen_trial_factory(i, dist=int_dist, value_fn=int_value_fn) for i in range(1, 8)
]
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
int_suggestion = sampler.sample_relative(study, trial, {"param-a": int_dist})
assert 1 <= int_suggestion["param-a"] <= 100
assert isinstance(int_suggestion["param-a"], int)
assert int_suggestion["param-a"] % step == 0
def test_sample_relative_int_loguniform_distributions() -> None:
"""Test sampling from int distribution returns integer."""
study = optuna.create_study()
def int_value_fn(idx: int) -> float:
random.seed(idx)
return random.randint(0, 100)
intlog_dist = optuna.distributions.IntDistribution(1, 100, log=True)
past_trials = [
frozen_trial_factory(i, dist=intlog_dist, value_fn=int_value_fn) for i in range(1, 8)
]
trial = frozen_trial_factory(8)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
intlog_suggestion = sampler.sample_relative(study, trial, {"param-a": intlog_dist})
assert 1 <= intlog_suggestion["param-a"] <= 100
assert isinstance(intlog_suggestion["param-a"], int)
@pytest.mark.parametrize(
"state",
[
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.PRUNED,
optuna.trial.TrialState.RUNNING,
optuna.trial.TrialState.WAITING,
],
)
def test_sample_relative_handle_unsuccessful_states(
state: optuna.trial.TrialState,
) -> None:
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
# Prepare sampling result for later tests.
study = optuna.create_study()
for i in range(1, 100):
trial = frozen_trial_factory(i, dist=dist)
study._storage.create_new_trial(study._study_id, template_trial=trial)
trial = frozen_trial_factory(100)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
all_success_suggestion = sampler.sample_relative(study, trial, {"param-a": dist})
# Test unsuccessful trials are handled differently.
study = optuna.create_study()
state_fn = build_state_fn(state)
for i in range(1, 100):
trial = frozen_trial_factory(i, dist=dist, state_fn=state_fn)
study._storage.create_new_trial(study._study_id, template_trial=trial)
trial = frozen_trial_factory(100)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
partial_unsuccessful_suggestion = sampler.sample_relative(study, trial, {"param-a": dist})
assert partial_unsuccessful_suggestion != all_success_suggestion
def test_sample_relative_ignored_states() -> None:
"""Tests FAIL, RUNNING, and WAITING states are equally."""
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
suggestions = []
for state in [
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.RUNNING,
optuna.trial.TrialState.WAITING,
]:
study = optuna.create_study()
state_fn = build_state_fn(state)
for i in range(1, 30):
trial = frozen_trial_factory(i, dist=dist, state_fn=state_fn)
study._storage.create_new_trial(study._study_id, template_trial=trial)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
suggestions.append(sampler.sample_relative(study, trial, {"param-a": dist})["param-a"])
assert len(set(suggestions)) == 1
def test_sample_relative_pruned_state() -> None:
"""Tests PRUNED state is treated differently from both FAIL and COMPLETE."""
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
suggestions = []
for state in [
optuna.trial.TrialState.COMPLETE,
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.PRUNED,
]:
study = optuna.create_study()
state_fn = build_state_fn(state)
for i in range(1, 40):
trial = frozen_trial_factory(i, dist=dist, state_fn=state_fn)
study._storage.create_new_trial(study._study_id, template_trial=trial)
trial = frozen_trial_factory(40)
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(n_startup_trials=5, seed=0, multivariate=True)
suggestions.append(sampler.sample_relative(study, trial, {"param-a": dist})["param-a"])
assert len(set(suggestions)) == 3
def test_sample_independent_seed_fix() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 8)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_independent(study, trial, "param-a", dist) == suggestion
sampler = TPESampler(n_startup_trials=5, seed=1)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_sample_independent_prior() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 8)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
sampler = TPESampler(consider_prior=False, n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(prior_weight=0.1, n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_sample_independent_n_startup_trial() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials[:4]):
with patch.object(
optuna.samplers.RandomSampler, "sample_independent", return_value=1.0
) as sample_method:
sampler.sample_independent(study, trial, "param-a", dist)
assert sample_method.call_count == 1
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
with patch.object(
optuna.samplers.RandomSampler, "sample_independent", return_value=1.0
) as sample_method:
sampler.sample_independent(study, trial, "param-a", dist)
assert sample_method.call_count == 0
def test_sample_independent_misc_arguments() -> None:
study = optuna.create_study()
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=dist) for i in range(1, 8)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
suggestion = sampler.sample_independent(study, trial, "param-a", dist)
# Test misc. parameters.
sampler = TPESampler(n_ei_candidates=13, n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(gamma=lambda _: 5, n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
sampler = TPESampler(
weights=lambda i: np.asarray([10 - j for j in range(i)]), n_startup_trials=5, seed=0
)
with patch("optuna.Study.get_trials", return_value=past_trials):
assert sampler.sample_independent(study, trial, "param-a", dist) != suggestion
def test_sample_independent_uniform_distributions() -> None:
study = optuna.create_study()
# Prepare sample from uniform distribution for cheking other distributions.
uni_dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=uni_dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
uniform_suggestion = sampler.sample_independent(study, trial, "param-a", uni_dist)
assert 1.0 <= uniform_suggestion < 100.0
def test_sample_independent_log_uniform_distributions() -> None:
"""Prepare sample from uniform distribution for cheking other distributions."""
study = optuna.create_study()
uni_dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, dist=uni_dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
uniform_suggestion = sampler.sample_independent(study, trial, "param-a", uni_dist)
# Test sample from log-uniform is different from uniform.
log_dist = optuna.distributions.FloatDistribution(1.0, 100.0, log=True)
past_trials = [frozen_trial_factory(i, dist=log_dist) for i in range(1, 8)]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
loguniform_suggestion = sampler.sample_independent(study, trial, "param-a", log_dist)
assert 1.0 <= loguniform_suggestion < 100.0
assert uniform_suggestion != loguniform_suggestion
def test_sample_independent_disrete_uniform_distributions() -> None:
"""Test samples from discrete have expected intervals."""
study = optuna.create_study()
disc_dist = optuna.distributions.FloatDistribution(1.0, 100.0, step=0.1)
def value_fn(idx: int) -> float:
random.seed(idx)
return int(random.random() * 1000) * 0.1
past_trials = [frozen_trial_factory(i, dist=disc_dist, value_fn=value_fn) for i in range(1, 8)]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch("optuna.Study.get_trials", return_value=past_trials):
discrete_uniform_suggestion = sampler.sample_independent(
study, trial, "param-a", disc_dist
)
assert 1.0 <= discrete_uniform_suggestion <= 100.0
assert abs(int(discrete_uniform_suggestion * 10) - discrete_uniform_suggestion * 10) < 1e-3
def test_sample_independent_categorical_distributions() -> None:
"""Test samples are drawn from the specified category."""
study = optuna.create_study()
categories = [i * 0.3 + 1.0 for i in range(330)]
def cat_value_fn(idx: int) -> float:
random.seed(idx)
return categories[random.randint(0, len(categories) - 1)]
cat_dist = optuna.distributions.CategoricalDistribution(categories)
past_trials = [
frozen_trial_factory(i, dist=cat_dist, value_fn=cat_value_fn) for i in range(1, 8)
]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
categorical_suggestion = sampler.sample_independent(study, trial, "param-a", cat_dist)
assert categorical_suggestion in categories
def test_sample_independent_int_uniform_distributions() -> None:
"""Test sampling from int distribution returns integer."""
study = optuna.create_study()
def int_value_fn(idx: int) -> float:
random.seed(idx)
return random.randint(0, 100)
int_dist = optuna.distributions.IntDistribution(1, 100)
past_trials = [
frozen_trial_factory(i, dist=int_dist, value_fn=int_value_fn) for i in range(1, 8)
]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
int_suggestion = sampler.sample_independent(study, trial, "param-a", int_dist)
assert 1 <= int_suggestion <= 100
assert isinstance(int_suggestion, int)
def test_sample_independent_int_loguniform_distributions() -> None:
"""Test sampling from int distribution returns integer."""
study = optuna.create_study()
def int_value_fn(idx: int) -> float:
random.seed(idx)
return random.randint(0, 100)
intlog_dist = optuna.distributions.IntDistribution(1, 100, log=True)
past_trials = [
frozen_trial_factory(i, dist=intlog_dist, value_fn=int_value_fn) for i in range(1, 8)
]
trial = frozen_trial_factory(8)
sampler = TPESampler(n_startup_trials=5, seed=0)
with patch.object(study._storage, "get_all_trials", return_value=past_trials):
intlog_suggestion = sampler.sample_independent(study, trial, "param-a", intlog_dist)
assert 1 <= intlog_suggestion <= 100
assert isinstance(intlog_suggestion, int)
@pytest.mark.parametrize(
"state",
[
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.PRUNED,
optuna.trial.TrialState.RUNNING,
optuna.trial.TrialState.WAITING,
],
)
def test_sample_independent_handle_unsuccessful_states(state: optuna.trial.TrialState) -> None:
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
# Prepare sampling result for later tests.
study = optuna.create_study()
for i in range(1, 30):
trial = frozen_trial_factory(i, dist=dist)
study._storage.create_new_trial(study._study_id, template_trial=trial)
trial = frozen_trial_factory(30)
sampler = TPESampler(n_startup_trials=5, seed=2)
all_success_suggestion = sampler.sample_independent(study, trial, "param-a", dist)
# Test unsuccessful trials are handled differently.
state_fn = build_state_fn(state)
study = optuna.create_study()
for i in range(1, 30):
trial = frozen_trial_factory(i, dist=dist, state_fn=state_fn)
study._storage.create_new_trial(study._study_id, template_trial=trial)
trial = frozen_trial_factory(30)
sampler = TPESampler(n_startup_trials=5, seed=2)
partial_unsuccessful_suggestion = sampler.sample_independent(study, trial, "param-a", dist)
assert partial_unsuccessful_suggestion != all_success_suggestion
def test_sample_independent_ignored_states() -> None:
"""Tests FAIL, RUNNING, and WAITING states are equally."""
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
suggestions = []
for state in [
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.RUNNING,
optuna.trial.TrialState.WAITING,
]:
study = optuna.create_study()
state_fn = build_state_fn(state)
for i in range(1, 30):
trial = frozen_trial_factory(i, dist=dist, state_fn=state_fn)
study._storage.create_new_trial(study._study_id, template_trial=trial)
trial = frozen_trial_factory(30)
sampler = TPESampler(n_startup_trials=5, seed=0)
suggestions.append(sampler.sample_independent(study, trial, "param-a", dist))
assert len(set(suggestions)) == 1
def test_sample_independent_pruned_state() -> None:
"""Tests PRUNED state is treated differently from both FAIL and COMPLETE."""
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
suggestions = []
for state in [
optuna.trial.TrialState.COMPLETE,
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.PRUNED,
]:
study = optuna.create_study()
state_fn = build_state_fn(state)
for i in range(1, 30):
trial = frozen_trial_factory(i, dist=dist, state_fn=state_fn)
study._storage.create_new_trial(study._study_id, template_trial=trial)
trial = frozen_trial_factory(30)
sampler = TPESampler(n_startup_trials=5, seed=2)
suggestions.append(sampler.sample_independent(study, trial, "param-a", dist))
assert len(set(suggestions)) == 3
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
def test_get_observation_pairs(direction: str) -> None:
def objective(trial: Trial) -> float:
x = trial.suggest_int("x", 5, 5)
z = trial.suggest_categorical("z", [None])
if trial.number == 0:
return x * int(z is None)
elif trial.number == 1:
trial.report(1, 4)
trial.report(2, 7)
raise TrialPruned()
elif trial.number == 2:
trial.report(float("nan"), 3)
raise TrialPruned()
elif trial.number == 3:
raise TrialPruned()
else:
raise RuntimeError()
study = optuna.create_study(direction=direction)
study.optimize(objective, n_trials=5, catch=(RuntimeError,))
sign = 1 if direction == "minimize" else -1
scores = [
(-float("inf"), [sign * 5.0]), # COMPLETE
(-7, [sign * 2]), # PRUNED (with intermediate values)
(-3, [float("inf")]), # PRUNED (with a NaN intermediate value; it's treated as infinity)
(float("inf"), [sign * 0.0]), # PRUNED (without intermediate values)
]
assert _tpe.sampler._get_observation_pairs(study, ["x"], False) == (
{"x": [5.0, 5.0, 5.0, 5.0]},
scores,
)
assert _tpe.sampler._get_observation_pairs(study, ["y"], False) == (
{"y": [None, None, None, None]},
scores,
)
assert _tpe.sampler._get_observation_pairs(study, ["z"], False) == (
{"z": [0, 0, 0, 0]}, # The internal representation of 'None' for z is 0
scores,
)
assert _tpe.sampler._get_observation_pairs(study, ["x"], True) == (
{"x": [5.0, 5.0, 5.0, 5.0]},
scores,
)
assert _tpe.sampler._get_observation_pairs(study, ["y"], True) == ({"y": []}, [])
assert _tpe.sampler._get_observation_pairs(study, ["z"], True) == (
{"z": [0, 0, 0, 0]}, # The internal representation of 'None' for z is 0
scores,
)
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
def test_get_observation_pairs_multi(direction: str) -> None:
def objective(trial: Trial) -> float:
x = trial.suggest_int("x", 5, 5)
y = trial.suggest_int("y", 6, 6)
if trial.number == 0:
return x + y
elif trial.number == 1:
trial.report(1, 4)
trial.report(2, 7)
raise TrialPruned()
elif trial.number == 2:
trial.report(float("nan"), 3)
raise TrialPruned()
elif trial.number == 3:
raise TrialPruned()
else:
raise RuntimeError()
study = optuna.create_study(direction=direction)
study.optimize(objective, n_trials=5, catch=(RuntimeError,))
sign = 1 if direction == "minimize" else -1
assert _tpe.sampler._get_observation_pairs(study, ["x", "y"], True) == (
{"x": [5.0, 5.0, 5.0, 5.0], "y": [6.0, 6.0, 6.0, 6.0]},
[
(-float("inf"), [sign * 11.0]), # COMPLETE
(-7, [sign * 2]), # PRUNED (with intermediate values)
(
-3,
[float("inf")],
), # PRUNED (with a NaN intermediate value; it's treated as infinity)
(float("inf"), [sign * 0.0]), # PRUNED (without intermediate values)
],
)
def test_split_observation_pairs() -> None:
indices_below, indices_above = _tpe.sampler._split_observation_pairs(
[
(-7, [-2]), # PRUNED (with intermediate values)
(float("inf"), [0.0]), # PRUNED (without intermediate values)
(
-3,
[float("inf")],
), # PRUNED (with a NaN intermediate value; it's treated as infinity)
(-float("inf"), [-5.0]), # COMPLETE
],
2,
)
assert list(indices_below) == [0, 3]
assert list(indices_above) == [1, 2]
def test_build_observation_dict() -> None:
observation_dict = _tpe.sampler._build_observation_dict(
{
"x": np.asarray([1.0, 2.0, 3.0, 4.0], dtype=float),
"y": np.asarray([10.0, None, 20.0, None], dtype=float),
},
np.asarray([0, 3]),
)
np.testing.assert_array_equal(observation_dict["x"], np.asarray([1.0, 4.0]))
np.testing.assert_array_equal(observation_dict["y"], np.asarray([10.0]))
def frozen_trial_factory(
idx: int,
dist: optuna.distributions.BaseDistribution = optuna.distributions.FloatDistribution(
1.0, 100.0
),
state_fn: Callable[
[int], optuna.trial.TrialState
] = lambda _: optuna.trial.TrialState.COMPLETE,
value_fn: Optional[Callable[[int], Union[int, float]]] = None,
target_fn: Callable[[float], float] = lambda val: (val - 20.0) ** 2,
interm_val_fn: Callable[[int], Dict[int, float]] = lambda _: {},
) -> optuna.trial.FrozenTrial:
if value_fn is None:
random.seed(idx)
value = random.random() * 99.0 + 1.0
else:
value = value_fn(idx)
return optuna.trial.FrozenTrial(
number=idx,
state=state_fn(idx),
value=target_fn(value),
datetime_start=None,
datetime_complete=None,
params={"param-a": value},
distributions={"param-a": dist},
user_attrs={},
system_attrs={},
intermediate_values=interm_val_fn(idx),
trial_id=idx,
)
def build_state_fn(state: optuna.trial.TrialState) -> Callable[[int], optuna.trial.TrialState]:
def state_fn(idx: int) -> optuna.trial.TrialState:
return [optuna.trial.TrialState.COMPLETE, state][idx % 2]
return state_fn
def test_call_after_trial_of_random_sampler() -> None:
sampler = TPESampler()
study = optuna.create_study(sampler=sampler)
with patch.object(
sampler._random_sampler, "after_trial", wraps=sampler._random_sampler.after_trial
) as mock_object:
study.optimize(lambda _: 1.0, n_trials=1)
assert mock_object.call_count == 1
def test_mixed_relative_search_space_pruned_and_completed_trials() -> None:
def objective(trial: Trial) -> float:
if trial.number == 0:
trial.suggest_float("param1", 0, 1)
raise optuna.exceptions.TrialPruned()
if trial.number == 1:
trial.suggest_float("param2", 0, 1)
return 0
return 0
sampler = TPESampler(n_startup_trials=1, multivariate=True)
study = optuna.create_study(sampler=sampler)
study.optimize(objective, 3)
def test_group() -> None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(multivariate=True, group=True)
study = optuna.create_study(sampler=sampler)
with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=2)
assert mock.call_count == 1
assert study.trials[-1].distributions == {"x": distributions.IntDistribution(low=0, high=10)}
with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
study.optimize(
lambda t: t.suggest_int("y", 0, 10) + t.suggest_float("z", -3, 3), n_trials=1
)
assert mock.call_count == 1
assert study.trials[-1].distributions == {
"y": distributions.IntDistribution(low=0, high=10),
"z": distributions.FloatDistribution(low=-3, high=3),
}
with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
study.optimize(
lambda t: t.suggest_int("y", 0, 10)
+ t.suggest_float("z", -3, 3)
+ t.suggest_float("u", 1e-2, 1e2, log=True)
+ bool(t.suggest_categorical("v", ["A", "B", "C"])),
n_trials=1,
)
assert mock.call_count == 2
assert study.trials[-1].distributions == {
"u": distributions.FloatDistribution(low=1e-2, high=1e2, log=True),
"v": distributions.CategoricalDistribution(choices=["A", "B", "C"]),
"y": distributions.IntDistribution(low=0, high=10),
"z": distributions.FloatDistribution(low=-3, high=3),
}
with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
study.optimize(lambda t: t.suggest_float("u", 1e-2, 1e2, log=True), n_trials=1)
assert mock.call_count == 3
assert study.trials[-1].distributions == {
"u": distributions.FloatDistribution(low=1e-2, high=1e2, log=True)
}
with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
study.optimize(
lambda t: t.suggest_int("y", 0, 10) + t.suggest_int("w", 2, 8, log=True), n_trials=1
)
assert mock.call_count == 4
assert study.trials[-1].distributions == {
"y": distributions.IntDistribution(low=0, high=10),
"w": distributions.IntDistribution(low=2, high=8, log=True),
}
with patch.object(sampler, "_sample_relative", wraps=sampler._sample_relative) as mock:
study.optimize(lambda t: t.suggest_int("x", 0, 10), n_trials=1)
assert mock.call_count == 6
assert study.trials[-1].distributions == {"x": distributions.IntDistribution(low=0, high=10)}
def test_invalid_multivariate_and_group() -> None:
with pytest.raises(ValueError):
_ = TPESampler(multivariate=False, group=True)
def test_group_experimental_warning() -> None:
with pytest.warns(optuna.exceptions.ExperimentalWarning):
_ = TPESampler(multivariate=True, group=True)
# This function is used only in test_group_deterministic_iteration, but declared at top-level
# because local function cannot be pickled, which occurs within multiprocessing.
def run_tpe(k: int, sequence_dict: Dict[int, List[int]], hash_dict: Dict[int, int]) -> None:
hash_dict[k] = hash("nondeterministic hash")
sampler = TPESampler(n_startup_trials=1, seed=2, multivariate=True, group=True)
study = create_study(sampler=sampler)
study.optimize(
lambda t: np.sum([t.suggest_int(f"x{i}", 0, 10) for i in range(10)]), n_trials=2
)
sequence_dict[k] = list(study.trials[-1].params.values())
def test_group_deterministic_iteration() -> None:
# Multiprocessing supports three way to start a process.
# We use `spawn` option to create a child process as a fresh python process.
# For more detail, see https://github.com/optuna/optuna/pull/3187#issuecomment-997673037.
multiprocessing.set_start_method("spawn", force=True)
manager = multiprocessing.Manager()
sequence_dict: Dict[int, List[int]] = manager.dict()
hash_dict: Dict[int, int] = manager.dict()
for i in range(3):
p = multiprocessing.Process(target=run_tpe, args=(i, sequence_dict, hash_dict))
p.start()
p.join()
# Hashes are expected to be different because string hashing is nondeterministic per process.
assert not (hash_dict[0] == hash_dict[1] == hash_dict[2])
# But the sequences are expected to be the same.
assert sequence_dict[0] == sequence_dict[1] == sequence_dict[2]
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
@pytest.mark.parametrize("multivariate", [True, False])
def test_constant_liar_observation_pairs(direction: str, multivariate: bool) -> None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", optuna.exceptions.ExperimentalWarning)
sampler = TPESampler(constant_liar=True)
study = optuna.create_study(sampler=sampler, direction=direction)
trial = study.ask()
trial.suggest_int("x", 2, 2)
assert (
len(study.trials) == 1 and study.trials[0].state == optuna.trial.TrialState.RUNNING
), "Precondition"
# The value of the constant liar should be penalizing, i.e. `float("inf")` during minimization
# and `-float("inf")` during maximization.
expected_values = [(-float("inf"), [float("inf") * (-1 if direction == "maximize" else 1)])]
assert _tpe.sampler._get_observation_pairs(
study, ["x"], multivariate, constant_liar=False
) == (
{"x": []},
[],
)
assert _tpe.sampler._get_observation_pairs(study, ["x"], multivariate, constant_liar=True) == (
{"x": [2]},
expected_values,
)
def test_constant_liar_experimental_warning() -> None:
with pytest.warns(optuna.exceptions.ExperimentalWarning):
_ = TPESampler(constant_liar=True)
|
main.py
|
from werkzeug.exceptions import BadRequestKeyError
from flask import Flask, request
import dataclasses
import json
from typing import Any
import urllib.parse
import requests
import mwparserfromhell
from mwparserfromhell.nodes import Wikilink, Text, Template
import time
import threading
default_chest_costs = dict(
Wood={7: 0},
Gold={1: 25_000, 2: 50_000, 7: 100_000},
Diamond={1: 50_000, 2: 100_000, 7: 250_000},
Emerald={1: 100_000, 2: 250_000, 7: 500_000},
Obsidian={1: 250_000, 2: 500_000, 7: 1_000_000},
Bedrock={4: 4, 7: 2_000_000}
)
@dataclasses.dataclass
class DungeonDrop:
item: str
floor: int
chest: str
cost: int
drop_chances: dict
def get_drop_chance(self, has_s_plus: bool, talisman_level: int, boss_luck: int):
drop_identifier = "S" + ("+" if has_s_plus else "") + "ABCD"[
talisman_level] + str(len([i for i in [0, 1, 3, 5, 10] if i >= boss_luck]))
return self.drop_chances.get(drop_identifier)
class ObjectEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if dataclasses.is_dataclass(o):
return o.__dict__
return super().default(o)
def romanToInt(num):
roman_numerals = {'I': 1, 'V': 5, 'X': 10}
result = 0
for i, c in enumerate(num):
if (i+1) == len(num) or roman_numerals[c] >= roman_numerals[num[i+1]]:
result += roman_numerals[c]
else:
result -= roman_numerals[c]
return result
def fetch_dungeon_loot():
titles = [f"Template:Catacombs Floor {f} Loot" for f in [
"I", "II", "III", "IV", "V", "VI", "VII"]]
titles.extend([f"Template:Catacombs Floor {f} Loot Master" for f in [
"I", "II", "III", "IV", "V", "VI", "VII"]])
items = {}
for title, floor in get_wiki_sources_by_title(*titles).items():
floor_num = -1
floor_data = {}
for template in floor.filter_templates():
if template.name.strip() == "Dungeon Chest Table/Row":
item = None
ifloor = None
chest = None
cost = 0
drop_chances = {}
for param in template.params:
attr_name = param.name.nodes[0].strip()
attr_value = param.value.nodes[0].strip()
if attr_name == "item":
if item is None:
item = attr_value.replace(
"Ultimate_Jerry", "Ultimate Jerry").replace("’", "'")
if item.startswith("Wise "):
item = "Ultimate " + item
elif item.endswith(" Pet"):
item = item.split(" Pet")[0]
elif attr_name == "chest":
chest = attr_value
elif attr_name == "cost":
cost = int(attr_value)
elif attr_name == "floor":
ifloor = int(attr_value)
if title.endswith("Master"):
ifloor += 7
floor_num = ifloor
elif attr_name.startswith("S"):
drop_chances[attr_name] = attr_value
if item is None or ifloor is None or chest is None:
print("WARNING: Missing data for item: " + str(template))
else:
if cost == 0:
defaults = default_chest_costs[chest]
cost = defaults[min(
f for f in defaults.keys() if f >= (ifloor-7 if title.endswith("Master") else ifloor))]
if item == "Spirit Wing":
cost = 2000000
if(chest not in floor_data.keys()):
floor_data[chest] = []
floor_data[chest].append(DungeonDrop(
item, ifloor, chest, cost, drop_chances))
items[floor_num] = floor_data
return items
def fetch_dragon_loot():
titles = [f"Template:Dragon loot tables {f}" for f in [
"superior", "strong", "unstable", "young", "wise", "old", "protector"]]
items = {}
for title, dragon in get_wiki_sources_by_title(*titles).items():
cur_floor = {}
cur_name = ""
cur_item = {}
for counter, template in enumerate(dragon.nodes):
if type(template) == Wikilink:
if not template.title.startswith("File"):
if cur_item != {}:
cur_floor[cur_name] = cur_item
cur_name = template.title.strip()
if dragon.nodes[counter-2] == "{{Legendary}}":
cur_name = f"Legendary {cur_name}"
elif dragon.nodes[counter-2] == "{{Epic}}":
cur_name = f"Epic {cur_name}"
if cur_name.endswith(" Pet"):
cur_name = cur_name.split(" Pet")[0]
cur_item = {}
elif template.title.startswith("File:SkyBlock items summoning eye.png"):
cur_item["eye"] = True
elif type(template) == Text:
if template.value.strip() == "Unique":
cur_item["unique"] = True
else:
try:
cur_item["quality"] = int(template.strip())
except ValueError:
pass
elif type(template) == Template:
if len(template.params) == 2 and template.params[0] == "green":
cur_item["drop_chance"] = template.params[1].value.strip()
items[title.split("tables ")[1]] = cur_floor
return items
def get_wiki_sources_by_title(*page_titles: str, wiki_host: str = "wiki.hypixel.net"):
prepared_titles = "|".join(map(urllib.parse.quote, page_titles))
api_data = requests.get(
f"https://{wiki_host}/api.php?action=query&prop=revisions&titles={prepared_titles}&rvprop=content&format=json&rvslots=main").json()
if "batchcomplete" not in api_data:
print(f"Batch data not present in wiki response for: {page_titles}")
return {
page["title"]: mwparserfromhell.parse(
page["revisions"][0]["slots"]["main"]["*"])
for _, page in api_data["query"]["pages"].items()
}
def update_data():
dungeon_loot_data = fetch_dungeon_loot()
with open("dungeon_loot.json", "w", encoding="utf-8") as f:
json.dump(dungeon_loot_data, f, ensure_ascii=False,
indent=4, cls=ObjectEncoder)
dragon_loot_data = fetch_dragon_loot()
with open("dragon_loot.json", "w", encoding="utf-8") as f:
json.dump(dragon_loot_data, f, ensure_ascii=False,
indent=4, cls=ObjectEncoder)
app = Flask("app")
@app.route("/")
def home():
return {"deez": "nuts"}
@app.route("/dungeon_loot")
def dungeon_loot():
args = request.args
floor = 0
try:
floor = int(args["floor"])
except (ValueError, BadRequestKeyError):
pass
if floor < 1 or floor > 14:
return {"cause": "Invalid"}
with open("dungeon_loot.json") as file:
return json.load(file)[str(floor)]
@app.route("/dragon_loot")
def dragon_loot():
with open("dragon_loot.json") as file:
return json.load(file)
@app.before_first_request
def activate_job():
def run_job():
while True:
print("Updating data")
update_data()
print("Data updated")
time.sleep(3600)
thread = threading.Thread(target=run_job)
thread.start()
app.run(host="0.0.0.0", port=8081)
|
background_helper.py
|
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2018 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
from ctypes import c_bool
import multiprocessing as mp
# mp = mp.get_context('fork')
import logging
logger = logging.getLogger(__name__)
class EarlyCancellationError(Exception):
pass
class Task_Proxy(object):
'''Future like object that runs a given generator in the background and returns is able to return the results incrementally'''
def __init__(self, name, generator, args=(), kwargs={}):
super().__init__()
self._should_terminate_flag = mp.Value(c_bool, 0)
self._completed = False
self._canceled = False
pipe_recv, pipe_send = mp.Pipe(False)
wrapper_args = [pipe_send, self._should_terminate_flag, generator]
wrapper_args.extend(args)
self.process = mp.Process(target=self._wrapper, name=name, args=wrapper_args, kwargs=kwargs)
self.process.daemon = True
self.process.start()
self.pipe = pipe_recv
def _wrapper(self, pipe, _should_terminate_flag, generator, *args, **kwargs):
'''Executed in background, pipes generator results to foreground'''
logger.debug('Entering _wrapper')
try:
for datum in generator(*args, **kwargs):
if _should_terminate_flag.value:
raise EarlyCancellationError('Task was cancelled')
pipe.send(datum)
except Exception as e:
pipe.send(e)
if not isinstance(e, EarlyCancellationError):
import traceback
print(traceback.format_exc())
else:
pipe.send(StopIteration())
finally:
pipe.close()
logger.debug('Exiting _wrapper')
def fetch(self):
'''Fetches progress and available results from background'''
if self.completed or self.canceled:
return
while self.pipe.poll(0):
try:
datum = self.pipe.recv()
except EOFError:
logger.debug("Process canceled be user.")
self._canceled = True
return
else:
if isinstance(datum, StopIteration):
self._completed = True
return
elif isinstance(datum, EarlyCancellationError):
self._canceled = True
return
elif isinstance(datum, Exception):
raise datum
else:
yield datum
def cancel(self, timeout=1):
if not (self.completed or self.canceled):
self._should_terminate_flag.value = True
for x in self.fetch():
# fetch to flush pipe to allow process to react to cancel comand.
pass
if self.process is not None:
self.process.join(timeout)
self.process = None
@property
def completed(self):
return self._completed
@property
def canceled(self):
return self._canceled
def __del__(self):
self.cancel(timeout=.1)
self.process = None
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(processName)s - [%(levelname)s] %(name)s: %(message)s')
def example_generator(mu=0., sigma=1., steps=100):
'''samples `N(\mu, \sigma^2)`'''
import numpy as np
from time import sleep
for i in range(steps):
# yield progress, datum
yield (i + 1) / steps, sigma * np.random.randn() + mu
sleep(np.random.rand() * .1)
# initialize task proxy
task = Task_Proxy('Background', example_generator, args=(5., 3.), kwargs={'steps': 100})
from time import time, sleep
start = time()
maximal_duration = 2.
while time() - start < maximal_duration:
# fetch all available results
for progress, random_number in task.fetch():
logger.debug('[{:3.0f}%] {:0.2f}'.format(progress * 100, random_number))
# test if task is completed
if task.completed:
break
sleep(1.)
logger.debug('Canceling task')
task.cancel(timeout=1)
logger.debug('Task done')
|
main.py
|
#!/bin/python3
from __future__ import annotations # because raspberry pi is on Python 3.7 and annotations are 3.9
import datetime
from enum import IntEnum
# https://github.com/danielorf/pyhubitat
from pyhubitat import MakerAPI
import logging
from pathlib import Path
import pytz # timezones
import re
import threading
# https://github.com/python-telegram-bot/python-telegram-bot
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, Update, ParseMode
from telegram.ext import CallbackContext, CallbackQueryHandler, CommandHandler, MessageHandler, Filters, Updater
from typing import Union
import yaml
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
class AccessLevel(IntEnum):
NONE = 0
DEVICE = 1
SECURITY = 2
ADMIN = 3
class BotUser:
def __init__(self, id: int, access_level: AccessLevel, user_group: str, device_groups: list) -> None:
self.id = id
self.access_level = access_level
self.user_group = user_group
self.device_groups = device_groups
logging.debug(f"User={id}; AccessLevel:={access_level}; UserGroup={self.user_group}.")
def has_access(self, requested: AccessLevel) -> bool:
return self.access_level >= requested
class Telegram:
def __init__(self, conf: dict, hubitat):
self.hubitat = hubitat
self.users = {}
self.rejected_message = conf["rejected_message"]
for group_name, group_data in conf["user_groups"].items():
access_level = AccessLevel[group_data["access_level"]]
device_groups = [hubitat.get_device_group(name) for name in group_data["device_groups"]]
for id in map(int, group_data["ids"]):
if id in self.users:
raise ValueError(f"User id {id} is referenced in both groups '{group_name}' and '{self.users[id].user_group}'.")
self.users[id] = BotUser(id, access_level, group_name, device_groups)
self.updater = Updater(token=conf["token"], use_context=True)
self.dispatcher = self.updater.dispatcher
def get_user(self, id: int) -> BotUser:
return self.users[id]
class Device:
def __init__(self, device: dict):
self.id: int = int(device["id"])
self.label: str = device["label"]
self.type: str = device["type"]
self.commands: list[str] = device["commands"]
self.description: str = None
self.supported_commands: list[str] = []
class DeviceGroup:
def __init__(self, name: str, conf: dict, hubitat):
self.hubitat = hubitat
self.name = name
self.allowed_device_ids = set(map(int, conf["allowed_device_ids"]))
self.rejected_device_ids = set(map(int, conf["rejected_device_ids"]))
self._devices = None
logging.debug(f"DeviceGroup: {name}. AllowedDeviceIds: {self.allowed_device_ids}. RejectedDeviceIds: {self.rejected_device_ids}.")
def refresh_devices(self) -> None:
self._devices = None
def get_devices(self) -> dict[str, Device]:
def is_allowed_device(device: Device) -> bool:
name = f"{device.label}:{device.id}"
if self.allowed_device_ids and not device.id in self.allowed_device_ids:
logging.debug(f"Removing device '{name}' because not in allowed list.")
return False
if self.rejected_device_ids and device.id in self.rejected_device_ids:
logging.debug(f"Removing device '{name}' because in rejected list.")
return False
commands = [c["command"] for c in device.commands]
supported_commands = set()
for command in commands:
if command in self.hubitat.he_to_bot_commands:
bot_command = self.hubitat.he_to_bot_commands[command] or "/" + command
supported_commands.add(bot_command)
device.supported_commands = supported_commands
return True
if self._devices is None:
logging.debug(f"Refreshing device cache for device group '{self.name}'.")
self._devices = {self.hubitat.case_hack(device.label): device for device in self.hubitat.get_all_devices() if is_allowed_device(device)}
return self._devices
def get_device(self, name: str) -> dict[str, Device]:
return self.get_devices().get(self.hubitat.case_hack(name), None)
class Hubitat:
def __init__(self, conf: dict):
hub = f"{conf['url']}apps/api/{conf['appid']}"
logging.info(f"Connecting to hubitat Maker API app {hub}")
self.api = MakerAPI(conf["token"], hub)
self.device_groups = {}
self._devices_cache = None
self.case_insensitive: bool = bool(conf["case_insensitive"])
self.device_aliases: list(list(str)) = conf["device_aliases"]
self._device_descriptions: dict[int, str] = conf["device_descriptions"]
self.he_to_bot_commands = {"on": None, "off": None, "setLevel": "/dim", "open": None, "close": None, "lock": None, "unlock": None}
# because Python doesn't support case insensitive searches
# and Hubitats requires exact case, we create a dict{lowercase,requestedcase}
self.hsm_arm = {x.lower(): x for x in conf["hsm_arm_values"]}
for name, data in conf["device_groups"].items():
self.device_groups[name] = DeviceGroup(name, data, self)
if not self.device_groups:
raise Exception("At least one device group must be specified in the config file.")
def case_hack(self, name: str) -> str:
# Gross Hack (tm) because Python doesn't support case comparers for dictionaries
if self.case_insensitive:
name = name.lower()
return name
def refresh_devices(self) -> None:
self._devices_cache = None
for g in self.device_groups.values():
g.refresh_devices()
def get_device_group(self, name: str) -> DeviceGroup:
return self.device_groups[name]
def get_device_groups(self) -> list[str]:
return self.device_groups.values()
def get_all_devices(self) -> list[Device]:
if self._devices_cache is None:
logging.info("Refreshing all devices cache")
self._devices_cache = [Device(x) for x in self.api.list_devices_detailed()]
for device in self._devices_cache:
device.description = self._device_descriptions.get(device.id)
return self._devices_cache
def get_device(self, name: str, groups: list[DeviceGroup]) -> dict[str, Device]:
for group in groups:
ret = group.get_device(name)
if ret:
return ret
return None
class Homebot:
def __init__(self, telegram: Telegram, hubitat: Hubitat):
self.telegram = telegram
self.hubitat = hubitat
self.list_commands = {AccessLevel.NONE: [], AccessLevel.DEVICE: ["*Device commands*:"], AccessLevel.ADMIN: ["*Admin commands*:"], AccessLevel.SECURITY: ["*Security commands*:"]}
def send_text(self, update: Update, context: CallbackContext, text: Union[str, list[str]]) -> None:
self.send_text_or_list(update, context, text, None)
def send_md(self, update: Update, context: CallbackContext, text: Union[str, list[str]]) -> None:
self.send_text_or_list(update, context, text, ParseMode.MARKDOWN)
def send_text_or_list(self, update: Update, context: CallbackContext, text: Union[str, list[str]], parse_mode: ParseMode) -> None:
if not text:
return
if isinstance(text, list):
text = "\n".join(text)
context.bot.send_message(chat_id=update.effective_chat.id, text=text, parse_mode=parse_mode)
def add_command(self, cmd: list, hlp: str, fn, access_level: AccessLevel, params: str = None) -> None:
helptxt = ""
for str in cmd:
if helptxt:
helptxt = helptxt + ", "
helptxt = helptxt + "/" + str
self.telegram.dispatcher.add_handler(CommandHandler(str, fn, Filters.user(self.telegram.users)))
if params:
helptxt = helptxt + " `" + params + "`"
helptxt = helptxt + ": " + hlp
self.list_commands[access_level].append(helptxt)
def get_single_arg(self, context: CallbackContext) -> str:
return self.hubitat.case_hack(" ".join(context.args))
def get_device(self, update: Update, context: CallbackContext) -> Device:
device_name = self.get_single_arg(context)
if not device_name:
self.send_text(update, context, "Device name not specified.")
return None
device_groups = self.get_user(update).device_groups
device = self.hubitat.get_device(device_name, device_groups)
if device:
return device
for alias in self.hubitat.device_aliases:
pattern = self.hubitat.case_hack(alias[0])
sub = alias[1]
new_device_name = re.sub(pattern, sub, device_name)
logging.debug(f"Trying regex s/{pattern}/{sub}/ => {new_device_name}")
device = self.hubitat.get_device(new_device_name, device_groups)
if device:
return device
self.send_text(update, context, "Device not found. '/l' to get list of devices.")
return None
def markdown_escape(self, text: str) -> str:
text = re.sub(r"([_*\[\]()~`>\#\+\-=|\.!])", r"\\\1", text)
text = re.sub(r"\\\\([_*\[\]()~`>\#\+\-=|\.!])", r"\1", text)
return text
def get_timezone(self, context: CallbackContext) -> str:
return context.user_data.get("tz", None)
def set_timezone(self, context: CallbackContext, value: str) -> None:
context.user_data["tz"] = value
def get_user(self, update: Update) -> BotUser:
return self.telegram.get_user(update.effective_user.id)
def has_access(self, update: Update, access_level: AccessLevel) -> bool:
return self.get_user(update).has_access(access_level)
def get_user_info(self, update: Update) -> str:
if not update.effective_user:
return "None" # e.g., channel_post
return f"UserId {update.effective_user.id} ({update.effective_user.name})"
def log_command(self, update: Update, command: str, device: Device = None) -> None:
if device:
command = command + " " + device.label
logging.info(f"{self.get_user_info(update)} is sending command: {command}")
def request_access(self, update: Update, context: CallbackContext, access_level: AccessLevel) -> None:
if not self.has_access(update, access_level):
# user attempting to use admin/device/security command without perm, pretend it doesn't exist
self.command_unknown(update, context)
raise PermissionError(f"{self.get_user_info(update)} is attempting level {access_level} command without permission.")
def device_actuator(self, update: Update, context: CallbackContext, command: Union[str, list], bot_command: str, message: str, access_level=AccessLevel.DEVICE) -> None:
self.request_access(update, context, access_level)
device = self.get_device(update, context)
if device:
supported_commands = device.supported_commands
if bot_command not in supported_commands:
self.send_md(update, context, f"Command {bot_command} not supported by device `{device.label}`.")
return
self.log_command(update, bot_command, device)
if isinstance(command, list):
self.hubitat.api.send_command(device.id, command[0], command[1])
else:
self.hubitat.api.send_command(device.id, command)
self.send_text(update, context, message.format(device.label))
def command_device_info(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.DEVICE)
device = self.get_device(update, context)
if device:
info = self.hubitat.api.get_device_info(device.id)
self.log_command(update, "/info", device)
info["supported_commands"] = ", ".join(device.supported_commands)
if not self.has_access(update, AccessLevel.ADMIN):
info = {"label": info["label"], "supported_commands": info["supported_commands"]}
if device.description:
info["description"] = device.description
self.send_md(update, context, [f"*{k}*: `{v}`" for k, v in info.items()])
def command_refresh(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.ADMIN)
self.hubitat.refresh_devices()
self.send_text(update, context, "Refresh completed.")
def command_text(self, update: Update, context: CallbackContext) -> None:
# TODO: make it more interesting by consuming update.message.text
self.command_help(update, context)
def command_device_status(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.DEVICE)
device = self.get_device(update, context)
if device:
self.log_command(update, "/status", device)
status = self.hubitat.api.device_status(device.id)
text = [f"Status for device *{device.label}*:"]
if self.has_access(update, AccessLevel.ADMIN):
text += [f"*{k}*: `{v['currentValue']}` ({v['dataType']})" for k, v in status.items() if v["dataType"] != "JSON_OBJECT"]
else:
text += [f"*{k}*: `{v['currentValue']}`" for k, v in status.items() if v["dataType"] != "JSON_OBJECT"]
self.send_md(update, context, text)
def get_matching_timezones(self, input: str) -> list[str]:
input = input.lower()
return [v for v in pytz.common_timezones if input in v.lower()]
def command_timezone(self, update: Update, context: CallbackContext) -> None:
timezone = " ".join(context.args)
if timezone:
if timezone in pytz.all_timezones_set:
self.set_timezone(context, timezone)
self.send_text(update, context, "Timezone set")
else:
hits = self.get_matching_timezones(timezone)
if not hits:
hits = pytz.common_timezones
hits = hits[0:10]
self.send_text(update, context, "Invalid timezone. Valid timezones are: " + ", ".join(hits) + ", ...")
else:
timezone = self.get_timezone(context)
if timezone:
self.send_text(update, context, f"User timezone is: {timezone}.")
else:
self.send_text(update, context, "No timezone set for current user. Using UTC.")
def command_device_last_event(self, update: Update, context: CallbackContext) -> None:
self.get_device_events(update, context, True)
def command_device_events(self, update: Update, context: CallbackContext) -> None:
self.get_device_events(update, context, False)
def get_device_events(self, update: Update, context: CallbackContext, last_only: bool) -> None:
self.request_access(update, context, AccessLevel.SECURITY)
device = self.get_device(update, context)
if device:
self.log_command(update, "/events", device)
events = self.hubitat.api.get_device_events(device.id)
if len(events) == 0:
self.send_md(update, context, f"No events for device *{device.label}*")
return
tz = self.get_timezone(context)
tz_text = "UTC"
if tz:
tz_text = self.markdown_escape(tz)
tz = pytz.timezone(tz)
def convert_date(event_date: str) -> str:
# event_date is a string in ISO 8601 format
# e.g. 2022-02-03T04:02:32+0000
# start by transforming into a real datetime
event_date = datetime.datetime.strptime(event_date, "%Y-%m-%dT%H:%M:%S%z")
if tz:
# now transform it to the proper tz
event_date = event_date.astimezone(tz)
# and ... convert back to string.
event_date = event_date.strftime("%Y-%m-%d %H:%M:%S")
return event_date
if last_only:
event = events[0]
text = [f"Last event for device *{device.label}*:", f"Time: `{convert_date(event['date'])}` ({tz_text})", f"Name: {event['name']}", f"Value: {self.markdown_escape(event['value'])}"]
self.send_md(update, context, text)
return
def row(date, name, value) -> str:
return f"{date :20}|{name :12}|{value:10}"
text = [f"Events for device *{device.label}*, timezone {tz_text}:", "```", row("date", "name", "value")]
for event in events:
event_date = convert_date(event["date"])
text.append(row(event_date, event["name"], event["value"]))
text.append("```")
self.send_md(update, context, text)
def command_unknown(self, update: Update, context: CallbackContext) -> None:
self.send_text(update, context, "Unknown command.")
self.command_help(update, context)
def list_devices(self, update: Update, context: CallbackContext, devices: dict[str, Device], title: str):
self.request_access(update, context, AccessLevel.DEVICE)
devices_text = []
def get_description(device: Device) -> str:
if device.description:
return ": " + device.description
else:
return ""
if title:
devices_text.append(title)
if not devices:
devices_text.append("No devices.")
else:
if self.has_access(update, AccessLevel.ADMIN):
devices_text += [f"{info.label}: `{info.id}` ({info.type}) {info.description or ''}" for name, info in sorted(devices.items())]
else:
devices_text += [f"{info.label} {get_description(info)}" for name, info in sorted(devices.items())]
self.send_md(update, context, devices_text)
def command_list_devices(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.DEVICE)
device_groups = self.get_user(update).device_groups
device_filter = self.get_single_arg(context)
devices = {}
for device_group in device_groups:
for device in device_group.get_devices().values():
if device_filter in self.hubitat.case_hack(device.label):
devices[device.label] = device
self.list_devices(update, context, devices, None)
def command_list_groups(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.ADMIN)
group_filter = self.get_single_arg(context)
for group in self.hubitat.get_device_groups():
if group_filter in self.hubitat.case_hack(group.name):
self.list_devices(update, context, group.get_devices(), f"Devices in *{group.name}*:")
def command_help(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.NONE) # Technically not needed
self.send_md(update, context, self.list_commands[self.get_user(update).access_level])
def command_unknown_user(self, update: Update, context: CallbackContext) -> None:
logging.warning(f"Unknown {self.get_user_info(update)} is attempting to use the bot.")
self.send_text(update, context, self.telegram.rejected_message)
def command_device_on(self, update: Update, context: CallbackContext) -> None:
self.device_actuator(update, context, "on", "/on", "Turned on {}.")
def command_device_off(self, update: Update, context: CallbackContext) -> None:
self.device_actuator(update, context, "off", "/off", "Turned off {}.")
def command_device_open(self, update: Update, context: CallbackContext) -> None:
self.device_actuator(update, context, "open", "/open", "Opened {}.")
def command_device_close(self, update: Update, context: CallbackContext) -> None:
self.device_actuator(update, context, "close", "/close", "Closed {}.")
def command_device_lock(self, update: Update, context: CallbackContext) -> None:
self.device_actuator(update, context, "lock", "/lock", "Locked {}.", access_level=AccessLevel.SECURITY)
def command_device_unlock(self, update: Update, context: CallbackContext) -> None:
self.device_actuator(update, context, "unlock", "/unlock", "Unlocked {}.", access_level=AccessLevel.SECURITY)
def command_list_users(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.ADMIN)
def row(id, isAdmin, userGroup, deviceGroup) -> str:
return f"{id :10}|{isAdmin :5}|{userGroup :10}|{deviceGroup}"
text = ["```", row("Id", "Level", "UserGroup", "DeviceGroups"), "----------|-----|----------|-----------"]
text += [row(u.id, u.access_level, u.user_group, [group.name for group in u.device_groups]) for u in self.telegram.users.values()]
text.append("```")
self.send_md(update, context, text)
def get_percent(self, input: str) -> int:
percent = -1
try:
percent = int(input)
except ValueError:
return None
return percent if 100 >= percent >= 0 else None
def command_device_dim(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.DEVICE)
if len(context.args) < 2:
self.send_text(update, context, "Dim level and device name must be specified.")
return
percent = self.get_percent(context.args[0])
if not percent:
self.send_text(update, context, "Invalid dim level specified: must be an int between 0 and 100.")
return
context.args = context.args[1:]
self.device_actuator(update, context, ["setLevel", percent], "/dim", "Dimmed {} to " + str(percent) + "%")
def command_mode(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.SECURITY)
modes = self.hubitat.api._request_sender("modes").json()
if len(context.args) > 0:
# mode change requested
mode_requested = self.get_single_arg(context)
for mode in modes:
if mode["name"].lower() == mode_requested:
self.log_command(update, f"/mode {mode['name']}")
self.hubitat.api._request_sender(f"modes/{mode['id']}")
self.send_text(update, context, "Mode change completed.")
return
self.send_text(update, context, "Unknown mode.")
text = []
for mode in modes:
if mode["active"]:
text.append(mode["name"] + " (*)")
else:
text.append(mode["name"])
self.send_text(update, context, ", ".join(text))
def command_hsm(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.SECURITY)
if len(context.args) > 0:
# mode change requested
hsm_requested = self.get_single_arg(context)
if hsm_requested in self.hubitat.hsm_arm:
hsm = self.hubitat.hsm_arm[hsm_requested]
self.log_command(update, f"/arm {hsm}")
self.hubitat.api._request_sender(f"hsm/{hsm}")
self.send_text(update, context, "Arm request sent.")
else:
self.send_text(update, context, f"Invalid arm state. Supported values: {', '.join(self.hubitat.hsm_arm.values())}.")
else:
state = self.hubitat.api._request_sender("hsm").json()
self.send_text(update, context, f"State: {state['hsm']}")
def command_exit(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.SECURITY)
keyboard = [
[
InlineKeyboardButton("Yes", callback_data='Exit_Yes'),
InlineKeyboardButton("No", callback_data='Exit_No'),
],
[InlineKeyboardButton("More information", callback_data='Exit_Help')],
]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text('Are you sure you want to exit the bot?', reply_markup=reply_markup)
def shutdown_hack(self):
# this needs to be on a separate thread because otherwise updater.stop() deadlocks
self.telegram.updater.stop()
self.telegram.updater.is_idle = False
def button_press(self, update: Update, context: CallbackContext) -> None:
self.request_access(update, context, AccessLevel.SECURITY)
query = update.callback_query
query.answer()
if query.data == "Exit_Help":
query.edit_message_text(text="This will terminate the bot process. To autorestart, use forever if started from command line or '--restart=always' if started in a Docker container.")
return
if query.data == "Exit_Yes":
query.edit_message_text(text="Terminating the bot.")
threading.Thread(target=self.shutdown_hack).start()
return
if query.data == "Exit_No":
query.edit_message_text(text="Not terminating the bot.")
return
def configure(self) -> None:
dispatcher = self.telegram.dispatcher
# Reject anyone we don't know
dispatcher.add_handler(MessageHandler(~Filters.user(self.telegram.users.keys()), self.command_unknown_user))
self.add_command(["close"], "close device `name`", self.command_device_close, AccessLevel.DEVICE, params="name")
self.add_command(["dim", "d", "level"], "set device `name` to `number` percent", self.command_device_dim, AccessLevel.DEVICE, params="number name")
self.add_command(["events", "e"], "get recent events for device `name`", self.command_device_events, AccessLevel.SECURITY, params="name")
self.add_command(["exit", "x"], "terminates the robot", self.command_exit, AccessLevel.ADMIN)
self.add_command(["groups", "g"], "get device groups, optionally filtering name by `filter`", self.command_list_groups, AccessLevel.ADMIN, params="filter")
self.add_command(["help", "h"], "display help", self.command_help, AccessLevel.NONE) # sadly '/?' is not a valid command
self.add_command(["arm", "a"], "get hsm arm status or arm to `value`", self.command_hsm, AccessLevel.SECURITY, "value")
self.add_command(["info", "i"], "get info of device `name`", self.command_device_info, AccessLevel.DEVICE, params="name")
self.add_command(["lastevent", "le"], "get the last event for device `name`", self.command_device_last_event, AccessLevel.SECURITY, params="name")
self.add_command(["list", "l"], "get devices, optionally filtering name by `filter`", self.command_list_devices, AccessLevel.DEVICE, params="filter")
self.add_command(["lock"], "lock device `name`", self.command_device_lock, AccessLevel.SECURITY, params="name")
self.add_command(["mode", "m"], "lists modes or set mode to `value`", self.command_mode, AccessLevel.SECURITY, params="value")
self.add_command(["off"], "turn off device `name`", self.command_device_off, AccessLevel.DEVICE, params="name")
self.add_command(["on"], "turn on device `name`", self.command_device_on, AccessLevel.DEVICE, params="name")
self.add_command(["open"], "open device `name`", self.command_device_open, AccessLevel.DEVICE, params="name")
self.add_command(["refresh", "r"], "refresh list of devices", self.command_refresh, AccessLevel.ADMIN)
self.add_command(["status", "s"], "get status of device `name`", self.command_device_status, AccessLevel.DEVICE, params="name")
self.add_command(["timezone", "tz"], "get timezone or set it to `value`", self.command_timezone, AccessLevel.SECURITY, params="value")
self.add_command(["unlock"], "lock device `name`", self.command_device_unlock, AccessLevel.SECURITY, params="name")
self.add_command(["users", "u"], "get users", self.command_list_users, AccessLevel.ADMIN)
dispatcher.add_handler(MessageHandler(Filters.command, self.command_unknown))
dispatcher.add_handler(MessageHandler(Filters.text & (~Filters.command), self.command_text))
dispatcher.add_handler(CallbackQueryHandler(self.button_press))
self.list_commands[AccessLevel.DEVICE] += self.list_commands[AccessLevel.NONE]
self.list_commands[AccessLevel.SECURITY] += self.list_commands[AccessLevel.DEVICE]
self.list_commands[AccessLevel.ADMIN] += self.list_commands[AccessLevel.SECURITY]
def run(self) -> None:
self.telegram.updater.start_polling()
self.telegram.updater.idle()
try:
with open(Path(__file__).with_name("config.yaml")) as config_file:
config = yaml.safe_load(config_file)
if "telegram" not in config:
raise ValueError("Invalid config.yaml. Section telegram required.")
if "hubitat" not in config:
raise ValueError("Invalid config.yaml. Section hubitat required.")
if "main" in config:
conf = config["main"]
logging.getLogger().setLevel(logging.getLevelName(conf["logverbosity"]))
hubitat = Hubitat(config["hubitat"])
telegram = Telegram(config["telegram"], hubitat)
hal = Homebot(telegram, hubitat)
hal.configure()
hal.run()
exit(0)
except FileNotFoundError as e:
logging.error("Missing config.yaml file.")
exit(2)
|
smart_explainer.py
|
"""
Smart explainer module
"""
import logging
import copy
import pandas as pd
from shapash.webapp.smart_app import SmartApp
from shapash.utils.io import save_pickle
from shapash.utils.io import load_pickle
from shapash.utils.transform import inverse_transform, apply_postprocessing
from shapash.utils.transform import adapt_contributions
from shapash.utils.utils import get_host_name
from shapash.utils.threading import CustomThread
from shapash.utils.shap_backend import shap_contributions, check_explainer
from shapash.utils.check import check_model, check_label_dict, check_ypred, check_contribution_object,\
check_postprocessing, check_features_name
from shapash.manipulation.select_lines import keep_right_contributions
from .smart_state import SmartState
from .multi_decorator import MultiDecorator
from .smart_plotter import SmartPlotter
from .smart_predictor import SmartPredictor
from shapash.utils.model import predict_proba
logging.basicConfig(level=logging.INFO)
class SmartExplainer:
"""
The SmartExplainer class is the main object of the Shapash library.
It allows the Data Scientists to perform many operations to make the
results more understandable :
linking encoders, models, predictions, label dict and datasets.
SmartExplainer users have several methods which are described below.
The SmartExplainer Attributes :
data: dict
Data dictionary has 3 entries. Each key returns a pd.DataFrame (regression) or a list of pd.DataFrame
(classification - The length of the lists is equivalent to the number of labels).
All pd.DataFrame have she same shape (n_samples, n_features).
For the regression case, data that should be regarded as a single array
of size (n_samples, n_features, 3).
data['contrib_sorted']: pandas.DataFrame (regression) or list of pandas.DataFrame (classification)
Contains local contributions of the prediction set, with common line index.
Columns are 'contrib_1', 'contrib_2', ... and contains the top contributions
for each line from left to right. In multi-class problems, this is a list of
contributions, one for each class.
data['var_dict']: pandas.DataFrame (regression) or list of pandas.DataFrame (classification)
Must contain only ints. It gives, for each line, the list of most import features
regarding the local decomposition. In order to save space, columns are denoted by
integers, the conversion being done with the columns_dict member. In multi-class
problems, this is a list of dataframes, one for each class.
data['x_sorted']: pandas.DataFrame (regression) or list of pandas.DataFrame (classification)
It gives, for each line, the list of most important features values regarding the local
decomposition. These values can only be understood with respect to data['var_dict']
x_init: pandas.DataFrame
preprocessed dataset used by the model to perform the prediction.
x_pred: pandas.DataFrame
x_init dataset with inverse transformation with eventual postprocessing modifications.
x_contrib_plot: pandas.DataFrame
x_init dataset with inverse transformation, without postprocessing used for contribution_plot.
y_pred: pandas.DataFrame
User-specified prediction values.
contributions: pandas.DataFrame (regression) or list (classification)
local contributions aggregated if the preprocessing part requires it (e.g. one-hot encoding).
features_dict: dict
Dictionary mapping technical feature names to domain names.
inv_features_dict: dict
Inverse features_dict mapping.
label_dict: dict
Dictionary mapping integer labels to domain names (classification - target values).
inv_label_dict: dict
Inverse label_dict mapping.
columns_dict: dict
Dictionary mapping integer column number to technical feature names.
inv_columns_dict: dict
Inverse columns_dict mapping.
plot: object
Helper object containing all plotting functions (Bridge pattern).
model: model object
model used to check the different values of target estimate predict proba
features_desc: dict
Dictionary that references the numbers of feature values in the x_pred
features_imp: pandas.Series (regression) or list (classification)
Features importance values
preprocessing : category_encoders, ColumnTransformer, list or dict
The processing apply to the original data.
postprocessing : dict
Dictionnary of postprocessing modifications to apply in x_pred dataframe.
How to declare a new SmartExplainer object?
Example
--------
>>> xpl = SmartExplainer(features_dict=featd,label_dict=labeld)
features_dict & label_dict are both optional.
features_dict maps technical feature names to domain names.
label_dict specify the labels of target (classification).
"""
def __init__(self, features_dict={}, label_dict=None):
if isinstance(features_dict, dict) == False:
raise ValueError(
"""
features_dict must be a dict
"""
)
if label_dict is not None and isinstance(label_dict, dict) == False:
raise ValueError(
"""
label_dict must be a dict
"""
)
self.features_dict = features_dict
self.label_dict = label_dict
self.plot = SmartPlotter(self)
def compile(self, x, model, explainer=None, contributions=None, y_pred=None, preprocessing=None, postprocessing=None):
"""
The compile method is the first step to understand model and prediction. It performs the sorting
of contributions, the reverse preprocessing steps and performs all the calculations necessary for
a quick display of plots and efficient display of summary of explanation.
Most of the parameters are optional but all help to display results that can be understood
This step can last a few moments with large datasets.
Parameters
----------
x : pandas.DataFrame
Prediction set.
IMPORTANT: this should be the raw prediction set, whose values are seen by the end user.
x is a preprocessed dataset: Shapash can apply the model to it
model : model object
model used to consistency check. model object can also be used by some method to compute
predict and predict_proba values
explainer : explainer object
explainer must be a shap object
contributions : pandas.DataFrame, np.ndarray or list
single or multiple contributions (multi-class) to handle.
if pandas.Dataframe, the index and columns should be share with the prediction set.
if np.ndarray, index and columns will be generated according to x dataset
y_pred : pandas.Series, optional (default: None)
Prediction values (1 column only).
The index must be identical to the index of x_pred.
This is an interesting parameter for more explicit outputs. Shapash lets users define their own predict,
as they may wish to set their own threshold (classification)
preprocessing : category_encoders, ColumnTransformer, list, dict, optional (default: None)
--> Differents types of preprocessing are available:
- A single category_encoders (OrdinalEncoder/OnehotEncoder/BaseNEncoder/BinaryEncoder/TargetEncoder)
- A single ColumnTransformer with scikit-learn encoding or category_encoders transformers
- A list with multiple category_encoders with optional (dict, list of dict)
- A list with a single ColumnTransformer with optional (dict, list of dict)
- A dict
- A list of dict
postprocessing : dict, optional (default: None)
Dictionnary of postprocessing modifications to apply in x_pred dataframe.
Dictionnary with feature names as keys (or number, or well labels referencing to features names),
which modifies dataset features by features.
--> Different types of postprocessing are available, but the syntax is this one:
One key by features, 5 different types of modifications:
>>> {
‘feature1’ : { ‘type’ : ‘prefix’, ‘rule’ : ‘age: ‘ },
‘feature2’ : { ‘type’ : ‘suffix’, ‘rule’ : ‘$/week ‘ },
‘feature3’ : { ‘type’ : ‘transcoding’, ‘rule‘: { ‘code1’ : ‘single’, ‘code2’ : ‘married’}},
‘feature4’ : { ‘type’ : ‘regex’ , ‘rule‘: { ‘in’ : ‘AND’, ‘out’ : ‘ & ‘ }},
‘feature5’ : { ‘type’ : ‘case’ , ‘rule‘: ‘lower’‘ }
}
Only one transformation by features is possible.
Example
--------
>>> xpl.compile(x=xtest_df,model=my_model)
"""
self.x_init = x
self.x_pred = inverse_transform(self.x_init, preprocessing)
self.preprocessing = preprocessing
self.model = model
self._case, self._classes = self.check_model()
self.check_label_dict()
if self.label_dict:
self.inv_label_dict = {v: k for k, v in self.label_dict.items()}
if explainer is not None and contributions is not None:
raise ValueError("You have to specify just one of these arguments: explainer, contributions")
if contributions is None:
contributions, explainer = shap_contributions(model, self.x_init, self.check_explainer(explainer))
adapt_contrib = self.adapt_contributions(contributions)
self.state = self.choose_state(adapt_contrib)
self.contributions = self.apply_preprocessing(self.validate_contributions(adapt_contrib), preprocessing)
self.check_contributions()
self.explainer = explainer
self.y_pred = self.check_y_pred(y_pred)
self.columns_dict = {i: col for i, col in enumerate(self.x_pred.columns)}
self.inv_columns_dict = {v: k for k, v in self.columns_dict.items()}
self.check_features_dict()
self.inv_features_dict = {v: k for k, v in self.features_dict.items()}
postprocessing = self.modify_postprocessing(postprocessing)
self.check_postprocessing(postprocessing)
self.postprocessing_modifications = self.check_postprocessing_modif_strings(postprocessing)
self.postprocessing = postprocessing
if self.postprocessing_modifications:
self.x_contrib_plot = copy.deepcopy(self.x_pred)
self.x_pred = self.apply_postprocessing(postprocessing)
self.data = self.state.assign_contributions(
self.state.rank_contributions(
self.contributions,
self.x_pred
)
)
self.features_imp = None
self.features_desc = self.check_features_desc()
def add(self, y_pred=None, label_dict=None, features_dict=None):
"""
add method allows the user to add a label_dict, features_dict
or y_pred without compiling again (and it can last a few moments).
y_pred can be used in the plot to color scatter.
y_pred is needed in the to_pandas method.
label_dict and features_dict displays allow to display clearer results.
Parameters
----------
y_pred : pandas.Series, optional (default: None)
Prediction values (1 column only).
The index must be identical to the index of x_pred.
label_dict: dict, optional (default: None)
Dictionary mapping integer labels to domain names.
features_dict: dict, optional (default: None)
Dictionary mapping technical feature names to domain names.
"""
if y_pred is not None:
self.y_pred = self.check_y_pred(y_pred)
if label_dict is not None:
if isinstance(label_dict, dict) == False:
raise ValueError(
"""
label_dict must be a dict
"""
)
self.label_dict = label_dict
self.check_label_dict()
self.inv_label_dict = {v: k for k, v in self.label_dict.items()}
if features_dict is not None:
if isinstance(features_dict, dict) == False:
raise ValueError(
"""
features_dict must be a dict
"""
)
self.features_dict = features_dict
self.check_features_dict()
self.inv_features_dict = {v: k for k, v in self.features_dict.items()}
def choose_state(self, contributions):
"""
Select implementation of the smart explainer. Typically check if it is a
multi-class problem, in which case the implementation should be adapted
to lists of contributions.
Parameters
----------
contributions : object
Local contributions. Could also be a list of local contributions.
Returns
-------
object
SmartState or SmartMultiState, depending on the nature of the input.
"""
if isinstance(contributions, list):
return MultiDecorator(SmartState())
else:
return SmartState()
def adapt_contributions(self, contributions):
"""
If _case is "classification" and contributions a np.array or pd.DataFrame
this function transform contributions matrix in a list of 2 contributions
matrices: Opposite contributions and contributions matrices.
Parameters
----------
contributions : pandas.DataFrame, np.ndarray or list
Returns
-------
pandas.DataFrame, np.ndarray or list
contributions object modified
"""
return adapt_contributions(self._case, contributions)
def validate_contributions(self, contributions):
"""
Check len of list if _case is "classification"
Check contributions object type if _case is "regression"
Check type of contributions and transform into (list of) pd.Dataframe if necessary
Parameters
----------
contributions : pandas.DataFrame, np.ndarray or list
Returns
-------
pandas.DataFrame or list
"""
check_contribution_object(self._case, self._classes, contributions)
return self.state.validate_contributions(contributions, self.x_init)
def apply_preprocessing(self, contributions, preprocessing=None):
"""
Reconstruct contributions for original features, taken into account a preprocessing.
Parameters
----------
contributions : object
Local contributions, or list of local contributions.
preprocessing : object
Encoder taken from scikit-learn or category_encoders
Returns
-------
object
Reconstructed local contributions in the original space. Can be a list.
"""
if preprocessing:
return self.state.inverse_transform_contributions(
contributions,
preprocessing
)
else:
return contributions
def check_postprocessing_modif_strings(self, postprocessing=None):
"""
Check if any modification of postprocessing will convert numeric values into strings values.
If so, return True, otherwise False.
Parameters
----------
postprocessing: dict
Dict of postprocessing modifications to apply.
Returns
-------
modif: bool
Boolean which is True if any numerical variable will be converted into string.
"""
modif = False
if postprocessing is not None:
for key in postprocessing.keys():
dict_postprocess = postprocessing[key]
if dict_postprocess['type'] in {'prefix', 'suffix'} \
and pd.api.types.is_numeric_dtype(self.x_pred[key]):
modif = True
return modif
def modify_postprocessing(self, postprocessing=None):
"""
Modifies postprocessing parameter, to change only keys, with features name,
in case of parameters are not real feature names (with columns_dict,
or inv_features_dict).
Parameters
----------
postprocessing : Dict
Dictionnary of postprocessing to modify.
Returns
-------
Dict
Modified dictionnary, with same values but keys directly referencing to feature names.
"""
if postprocessing:
new_dic = dict()
for key in postprocessing.keys():
if key in self.features_dict:
new_dic[key] = postprocessing[key]
elif key in self.columns_dict.keys():
new_dic[self.columns_dict[key]] = postprocessing[key]
elif key in self.inv_features_dict:
new_dic[self.inv_features_dict[key]] = postprocessing[key]
else:
raise ValueError(f"Feature name '{key}' not found in the dataset.")
return new_dic
def check_postprocessing(self, postprocessing):
"""
Check that postprocessing parameter has good attributes.
Check if postprocessing is a dictionnary, and if its parameters are good.
Parameters
----------
postprocessing : dict
Dictionnary of postprocessing that need to be checked.
"""
check_postprocessing(self.x_pred, postprocessing)
def apply_postprocessing(self, postprocessing=None):
"""
Modifies x_pred Dataframe according to postprocessing modifications, if exists.
Parameters
----------
postprocessing: Dict
Dictionnary of postprocessing modifications to apply in x_pred.
Returns
-------
pandas.Dataframe
Returns x_pred if postprocessing is empty, modified dataframe otherwise.
"""
if postprocessing:
return apply_postprocessing(self.x_pred, postprocessing)
else:
return self.x_pred
def check_y_pred(self, ypred=None):
"""
Check if y_pred is a one column dataframe of integer or float
and if y_pred index matches x_pred index
Parameters
----------
ypred: pandas.DataFrame (optional)
User-specified prediction values.
"""
return check_ypred(self.x_pred, ypred)
def check_model(self):
"""
Check if model has a predict_proba method is a one column dataframe of integer or float
and if y_pred index matches x_pred index
Returns
-------
string:
'regression' or 'classification' according to the attributes of the model
"""
_case, _classes = check_model(self.model)
return _case, _classes
def check_label_dict(self):
"""
Check if label_dict and model _classes match
"""
if self._case != "regression":
return check_label_dict(self.label_dict, self._case, self._classes)
def check_features_dict(self):
"""
Check the features_dict and add the necessary keys if all the
input X columns are not present
"""
for feature in (set(list(self.columns_dict.values())) - set(list(self.features_dict))):
self.features_dict[feature] = feature
def check_contributions(self):
"""
Check if contributions and prediction set match in terms of shape and index.
"""
if not self.state.check_contributions(self.contributions, self.x_pred):
raise ValueError(
"""
Prediction set and contributions should have exactly the same number of lines
and number of columns. the order of the columns must be the same
Please check x, contributions and preprocessing arguments.
"""
)
def check_label_name(self, label, origin=None):
"""
Convert a string label in integer. If the label is already
an integer nothing is done. In all other cases an error is raised.
Parameters
----------
label: int or string
Integer (id) or string (business names)
origin: None, 'num', 'code', 'value' (default: None)
Kind of the label used in parameter
Returns
-------
tuple
label num, label code (class of the mode), label value
"""
if origin is None:
if label in self._classes:
origin = 'code'
elif self.label_dict is not None and label in self.label_dict.values():
origin = 'value'
elif isinstance(label, int) and label in range(-1, len(self._classes)):
origin = 'num'
try:
if origin == 'num':
label_num = label
label_code = self._classes[label]
label_value = self.label_dict[label_code] if self.label_dict else label_code
elif origin == 'code':
label_code = label
label_num = self._classes.index(label)
label_value = self.label_dict[label_code] if self.label_dict else label_code
elif origin == 'value':
label_code = self.inv_label_dict[label]
label_num = self._classes.index(label_code)
label_value = label
else:
raise ValueError
except ValueError:
raise Exception({"message": "Origin must be 'num', 'code' or 'value'."})
except Exception:
raise Exception({"message": f"Label ({label}) not found for origin ({origin})"})
return label_num, label_code, label_value
def check_features_name(self, features):
"""
Convert a list of feature names (string) or features ids into features ids.
Features names can be part of columns_dict or features_dict.
Parameters
----------
features : List
List of ints (columns ids) or of strings (business names)
Returns
-------
list of ints
Columns ids compatible with var_dict
"""
return check_features_name(self.columns_dict, self.features_dict, features)
def check_features_desc(self):
"""
Check x_pred dataframe, compute value counts of each feature
used in plot part
Returns
-------
dict
Number of unique values in x_pred
"""
return dict(self.x_pred.nunique())
def check_attributes(self, attribute):
"""
Check that explainer has the attribute precised
Parameters
----------
attribute: string
the label of the attribute to test
Returns
-------
Object content of the attribute specified from SmartExplainer instance
"""
if not hasattr(self, attribute):
raise ValueError(
"""
attribute {0} isn't an attribute of the explainer precised.
""".format(attribute))
return self.__dict__[attribute]
def filter(
self,
features_to_hide=None,
threshold=None,
positive=None,
max_contrib=None
):
"""
The filter method is an important method which allows to summarize the local explainability
by using the user defined parameters which correspond to its use case.
Filter method is used with the local_plot method of Smarplotter to see the concrete result of this summary
with a local contribution barchart
Please, watch the local_plot tutorial to see how these two methods are combined with a concrete example
Parameters
----------
features_to_hide : list, optional (default: None)
List of strings, containing features to hide.
threshold : float, optional (default: None)
Absolute threshold below which any contribution is hidden.
positive: bool, optional (default: None)
If True, hide negative values. False, hide positive values
If None, hide nothing.
max_contrib : int, optional (default: None)
Maximum number of contributions to show.
"""
mask = [self.state.init_mask(self.data['contrib_sorted'], True)]
if features_to_hide:
mask.append(
self.state.hide_contributions(
self.data['var_dict'],
features_list=self.check_features_name(features_to_hide)
)
)
if threshold:
mask.append(
self.state.cap_contributions(
self.data['contrib_sorted'],
threshold=threshold
)
)
if positive is not None:
mask.append(
self.state.sign_contributions(
self.data['contrib_sorted'],
positive=positive
)
)
self.mask = self.state.combine_masks(mask)
if max_contrib:
self.mask = self.state.cutoff_contributions(self.mask, max_contrib=max_contrib)
self.masked_contributions = self.state.compute_masked_contributions(
self.data['contrib_sorted'],
self.mask
)
self.mask_params = {
'features_to_hide': features_to_hide,
'threshold': threshold,
'positive': positive,
'max_contrib': max_contrib
}
def save(self, path):
"""
Save method allows user to save SmartExplainer object on disk
using a pickle file.
Save method can be useful: you don't have to recompile to display
results later
Parameters
----------
path : str
File path to store the pickle file
Example
--------
>>> xpl.save('path_to_pkl/xpl.pkl')
"""
dict_to_save = {}
for att in self.__dict__.keys():
if isinstance(getattr(self, att), (list, dict, pd.DataFrame, pd.Series, type(None))) or att == "model":
dict_to_save.update({att: getattr(self, att)})
save_pickle(dict_to_save, path)
def load(self, path):
"""
Load method allows Shapash user to use pickled SmartExplainer.
To use this method you must first declare your SmartExplainer object
Watch the following example
Parameters
----------
path : str
File path of the pickle file.
Example
--------
>>> xpl = SmartExplainer()
>>> xpl.load('path_to_pkl/xpl.pkl')
"""
dict_to_load = load_pickle(path)
if isinstance(dict_to_load, dict):
for elem in dict_to_load.keys():
setattr(self, elem, dict_to_load[elem])
self._case, self._classes = self.check_model()
self.state = self.choose_state(self.contributions)
else:
raise ValueError(
"pickle file must contain dictionary"
)
def predict_proba(self):
"""
The predict_proba compute the proba values for each x_init row
"""
self.proba_values = predict_proba(self.model, self.x_init, self._classes)
def to_pandas(
self,
features_to_hide=None,
threshold=None,
positive=None,
max_contrib=None,
proba=False
):
"""
The to_pandas method allows to export the summary of local explainability.
This method proposes a set of parameters to summarize the explainability of each point.
If the user does not specify any, the to_pandas method uses the parameter specified during
the last execution of the filter method.
In classification case, The method to_pandas summarizes the explicability which corresponds
to the predicted values specified by the user (with compile or add method).
the proba parameter displays the corresponding predict proba value for each point
In classification case, There are 2 ways to use this to pandas method.
- Provide a real prediction set to explain
- Focus on a constant target value and look at the proba and explainability corresponding to each point.
(in that case, specify a constant pd.Series with add or compile method)
Examples are presented in the tutorial local_plot (please check tutorial part of this doc)
Parameters
----------
features_to_hide : list, optional (default: None)
List of strings, containing features to hide.
threshold : float, optional (default: None)
Absolute threshold below which any contribution is hidden.
positive: bool, optional (default: None)
If True, hide negative values. Hide positive values otherwise. If None, hide nothing.
max_contrib : int, optional (default: 5)
Number of contributions to show in the pandas df
proba : bool, optional (default: False)
adding proba in output df
Returns
-------
pandas.DataFrame
- selected explanation of each row for classification case
Examples
--------
>>> summary_df = xpl.to_pandas(max_contrib=2,proba=True)
>>> summary_df
pred proba feature_1 value_1 contribution_1 feature_2 value_2 contribution_2
0 0 0.756416 Sex 1.0 0.322308 Pclass 3.0 0.155069
1 3 0.628911 Sex 2.0 0.585475 Pclass 1.0 0.370504
2 0 0.543308 Sex 2.0 -0.486667 Pclass 3.0 0.255072
"""
# Classification: y_pred is needed
if self.y_pred is None:
raise ValueError(
"You have to specify y_pred argument. Please use add() or compile() method"
)
# Apply filter method if necessary
if all(var is None for var in [features_to_hide, threshold, positive, max_contrib]) \
and hasattr(self, 'mask_params'):
print('to_pandas params: ' + str(self.mask_params))
else:
self.filter(features_to_hide=features_to_hide,
threshold=threshold,
positive=positive,
max_contrib=max_contrib)
# Summarize information
self.data['summary'] = self.state.summarize(
self.data['contrib_sorted'],
self.data['var_dict'],
self.data['x_sorted'],
self.mask,
self.columns_dict,
self.features_dict
)
# Matching with y_pred
if proba:
self.predict_proba() if proba else None
proba_values = self.proba_values
else:
proba_values = None
y_pred, summary = keep_right_contributions(self.y_pred, self.data['summary'],
self._case, self._classes,
self.label_dict, proba_values)
return pd.concat([y_pred,summary], axis=1)
def compute_features_import(self, force=False):
"""
Compute a relative features importance, sum of absolute values
of the contributions for each.
Features importance compute in base 100
Parameters
----------
force: bool (default: False)
True to force de compute if features importance is
already calculated
Returns
-------
pd.Serie (Regression)
or list of pd.Serie (Classification: One Serie for each target modality)
Each Serie: feature importance, One row by feature,
index of the serie = contributions.columns
"""
if self.features_imp is None or force:
self.features_imp = self.state.compute_features_import(self.contributions)
def init_app(self):
"""
Simple init of SmartApp in case of host smartapp by another way
"""
self.smartapp = SmartApp(self)
def run_app(self, port: int = None, host: str = None) -> CustomThread:
"""
run_app method launches the interpretability web app associated with the shapash object.
run_app method can be used directly in a Jupyter notebook
The link to the webapp is directly mentioned in the Jupyter output
Use object.kill() method to kill the current instance
Examples are presented in the web_app tutorial (please check tutorial part of this doc)
Parameters
----------
port: int (default: None)
The port is by default on 8050. You can specify a custom port
for your webapp.
host: str (default: None)
The default host is '0.0.0.0'. You can specify a custom
ip address for your app
Returns
-------
CustomThread
Return the thread instance of your server.
Example
--------
>>> app = xpl.run_app()
>>> app.kill()
"""
if self.y_pred is None:
raise ValueError(
"You have to specify y_pred argument. Please use add() or compile() method"
)
if hasattr(self, '_case'):
self.smartapp = SmartApp(self)
if host is None:
host = "0.0.0.0"
if port is None:
port = 8050
host_name = get_host_name()
server_instance = CustomThread(
target=lambda: self.smartapp.app.run_server(debug=False, host=host, port=port))
if host_name is None:
host_name = host
elif host != "0.0.0.0":
host_name = host
server_instance.start()
logging.info(f"Your Shapash application run on http://{host_name}:{port}/")
logging.info("Use the method .kill() to down your app.")
return server_instance
else:
raise ValueError("Explainer must be compiled before running app.")
def to_smartpredictor(self):
"""
Create a SmartPredictor object designed from the following attributes
needed from the SmartExplainer Object :
features_dict: dict
Dictionary mapping technical feature names to domain names.
label_dict: dict
Dictionary mapping integer labels to domain names (classification - target values).
columns_dict: dict
Dictionary mapping integer column number to technical feature names.
features_types: dict
Dictionnary mapping features with the right types needed.
model: model object
model used to check the different values of target estimate predict proba
explainer : explainer object
explainer must be a shap object
preprocessing: category_encoders, ColumnTransformer, list or dict
The processing apply to the original data.
postprocessing: dict
Dictionnary of postprocessing modifications to apply in x_pred dataframe.
_case: string
String that informs if the model used is for classification or regression problem.
_classes: list, None
List of labels if the model used is for classification problem, None otherwise.
mask_params: dict (optional)
Dictionnary allowing the user to define a apply a filter to summarize the local explainability.
"""
if self.explainer is None:
raise ValueError("""SmartPredictor need an explainer, please compile without contributions or specify the
explainer used. Make change in compile() step""")
self.features_types = {features : str(self.x_pred[features].dtypes) for features in self.x_pred.columns}
listattributes = ["features_dict", "model", "columns_dict", "explainer", "features_types",
"label_dict", "preprocessing", "postprocessing"]
params_smartpredictor = [self.check_attributes(attribute) for attribute in listattributes]
if not hasattr(self,"mask_params"):
self.mask_params = {
"features_to_hide": None,
"threshold": None,
"positive": None,
"max_contrib": None
}
params_smartpredictor.append(self.mask_params)
return SmartPredictor(*params_smartpredictor)
def check_x_y_attributes(self, x_str, y_str):
"""
Check if x_str and y_str are attributes of the SmartExplainer
Parameters
----------
x_str: string
label of the attribute x
y_str: string
label of the attribute y
Returns
-------
list of object detained by attributes x and y.
"""
if not (isinstance(x_str, str) and isinstance(y_str, str)):
raise ValueError(
"""
x and y must be strings.
"""
)
params_checkypred = []
attributs_explainer = [x_str, y_str]
for attribut in attributs_explainer:
if hasattr(self, attribut):
params_checkypred.append(self.__dict__[attribut])
else:
params_checkypred.append(None)
return params_checkypred
def check_explainer(self, explainer):
"""
Check if explainer class correspond to a shap explainer object
"""
return check_explainer(explainer)
|
crypto_partial_book_fetch.py
|
import asyncio
from mimetypes import init
import time
import traceback
from datetime import datetime
from binance.client import Client
from binance import ThreadedDepthCacheManager
from binance import ThreadedWebsocketManager
from binance import AsyncClient, BinanceSocketManager
from multiprocessing import ProcessError, Queue, Process
class binance_book():
def __init__(self) -> None:
api_key = "5fNyacwdzx7QRXfsoPq1kYs6wCNvX2Ijsk3VPmqrcwA72FMQ9d6PqfXXyifyFPaJ"
api_secret = "IVPmECK6pHUsUhSQ6IP5BLtrxUkIz3K6YS1HJEeSeFsCY5Do7CTAkokf2z7Sfc2E"
# self.client = Client(api_key, api_secret, testnet=False)
# print("Client connected.")
self.symbol='SHIBUSDT'
self.stat = True
self.cum_upd = 0
# Get Market Depth
def book_construct(self, q:Queue):
symbol = self.symbol
depth = q.get()
self.book_bid = {x[0]:float(x[1]) for x in depth['bids']}
self.book_ask = {x[0]:float(x[1]) for x in depth['asks']}
initUpdId = depth['lastUpdateId']
lstUpdId = depth['lastUpdateId']
reorder_cache = {}
f = open(f'data/{symbol}_partial.txt', 'w')
f.writelines(list(map(lambda x: 'a A '+x[0]+' '+str(x[1])+'\n', self.book_ask.items())))
f.writelines(list(map(lambda x: 'b A '+x[0]+' '+str(x[1])+'\n', self.book_bid.items())))
try:
while self.stat:
res = q.get()
if lstUpdId+1 <= res['lastUpdateId']:
self.update_book(res, f)
lstUpdId = res['lastUpdateId']
self.cum_upd = res['lastUpdateId'] - initUpdId
self.stat = False if self.cum_upd>1000 else True
except Exception as e:
print(e)
track = traceback.format_exc()
print(track)
else:
self.snapshot()
finally:
f.close()
def snapshot(self):
symbol = self.symbol
f = open(f'data/{symbol}_partial_a_{self.cum_upd}.txt', 'w')
f.writelines(list(map(lambda x: x[0]+' '+str(x[1])+'\n', self.book_ask.items())))
f.close()
f = open(f'data/{symbol}_partial_b_{self.cum_upd}.txt', 'w')
f.writelines(list(map(lambda x: x[0]+' '+str(x[1])+'\n', self.book_bid.items())))
f.close()
def update_book(self, res, f):
if len(res['asks']):
latest = [x[0] for x in res['asks']]
for price in list(self.book_ask.keys()):
if price not in latest:
diff = -self.book_ask.pop(price, None)
f.write('a R '+price+' '+str(diff)+'\n')
for price, qty in res['asks']:
qty = float(qty)
pre_qty = 0 if self.book_ask.get(price) is None else self.book_ask[price]
diff = qty - pre_qty
if diff == 0:
continue
elif pre_qty == 0:
self.book_ask[price] = qty
f.write('a A '+price+' '+str(diff)+'\n')
else:
self.book_ask[price] = qty
f.write('a E '+price+' '+str(diff)+'\n')
if len(res['bids']):
latest = [x[0] for x in res['bids']]
for price in list(self.book_bid.keys()):
if price not in latest:
diff = -self.book_bid.pop(price, None)
f.write('b R '+price+' '+str(diff)+'\n')
for price, qty in res['bids']:
qty = float(qty)
pre_qty = 0 if self.book_bid.get(price) is None else self.book_bid[price]
diff = qty - pre_qty
if diff == 0:
continue
elif pre_qty == 0:
self.book_bid[price] = qty
f.write('b A '+price+' '+str(diff)+'\n')
else:
self.book_bid[price] = qty
f.write('b E '+price+' '+str(diff)+'\n')
async def SockerThread(self, q:Queue):
symbol = self.symbol
client = await AsyncClient.create()
bm = BinanceSocketManager(client)
# ds = bm.depth_socket(symbol)
ds = bm.multiplex_socket(['bnbbtc@depth5@100ms'])
# start any sockets here, i.e a trade socket
# ts = bm.trade_socket('BNBBTC')
# then start receiving messages
print("WebSocket connected.")
async with ds as tscm:
while self.stat:
res = await tscm.recv()
q.put(res['data'])
print(res['data']['lastUpdateId'])
await client.close_connection()
def run_socket(self, q):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.SockerThread(q))
def run_book_manage(self):
q = Queue()
p1 = Process(target=self.run_socket, args=(q,))
p2 = Process(target=self.book_construct, args=(q,))
p1.start()
p2.start()
p2.join()
p1.kill()
'''
def webSocket():
symbol = 'BNBBTC'
twm = ThreadedWebsocketManager(api_key=api_key, api_secret=api_secret)
# start is required to initialise its internal loop
twm.start()
def handle_socket_message(msg):
print(f"message type: {msg['e']}")
print(msg)
twm.start_kline_socket(callback=handle_socket_message, symbol=symbol)
# multiple sockets can be started
# twm.start_depth_socket(callback=handle_socket_message, symbol=symbol)
# or a multiplex socket can be started like this
# see Binance docs for stream names
# streams = ['bnbbtc@miniTicker', 'bnbbtc@bookTicker']
# streams = ['BNBBTC@depth20@100ms']
# twm.start_multiplex_socket(callback=handle_socket_message, streams=streams)
twm.join()
def depthCache():
dcm = ThreadedDepthCacheManager(api_key=api_key, api_secret=api_secret)
# start is required to initialise its internal loop
dcm.start()
def handle_depth_cache(depth_cache):
print(f"symbol {depth_cache.symbol}")
print("top 5 bids")
print(depth_cache.get_bids()[:5])
print("top 5 asks")
print(depth_cache.get_asks()[:5])
print("last update time {}".format(depth_cache.update_time))
dcm_name = dcm.start_depth_cache(handle_depth_cache, symbol='BNBBTC')
# multiple depth caches can be started
dcm_name = dcm.start_depth_cache(handle_depth_cache, symbol='ETHBTC')
dcm.join()
# dcm.stop_socket(dcm_name) # stop individual stream
# dcm.stop() # stop all
'''
if __name__ == "__main__":
bb = binance_book()
bb.run_book_manage()
|
mail.py
|
# 如果将本代码命名为 email.py,会与系统包文件冲突
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
from ._helpers import _populate_api_server_access_profile, _set_load_balancer_sku, _set_vm_set_type
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2019_08_01.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedCluster
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfile
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileManagedOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPPrefixes
from azure.mgmt.containerservice.v2019_08_01.models import ManagedClusterLoadBalancerProfileOutboundIPs
from azure.mgmt.containerservice.v2019_08_01.models import AgentPool
from azure.mgmt.containerservice.v2019_08_01.models import ResourceReference
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType, get_sdk
DeploymentProperties = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'DeploymentProperties', mod='models')
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n, NoTTYException
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
addon_profile = addon_profiles.get("kubeDashboard", ManagedClusterAddonProfile(enabled=True))
if not addon_profile.enabled:
raise CLIError('The kube-dashboard addon was disabled for this managed cluster.\n'
'To use "az aks browse" first enable the add-on\n'
'by running "az aks enable-addons --addons kube-dashboard".')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = '{0}://{1}:{2}/'.format(protocol, listen_address, listen_port)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", "--address", listen_address, dashboard_pod,
"{0}:{1}".format(listen_port, dashboard_port)], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:{1}".format(listen_port, dashboard_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
attach_acr=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = _set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
availability_zones=zones,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
monitoring = False
if 'omsagent' in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(api_server_authorized_ip_ranges)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
api_server_access_profile=api_server_access_profile
)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name, parameters=mc)
# add cluster spn with Monitoring Metrics Publisher role assignment to the cluster resource
# mdm metrics supported only in azure public cloud so add the role assignment only in this cloud
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud' and monitoring:
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_profile.client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for monitoring addon. '
'Are you an Owner on this subscription?')
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
service_principal_client_id = instance.service_principal_profile.client_id
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_client_id, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent',
'virtual-node': 'aciConnector',
'kube-dashboard': 'kubeDashboard'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
if node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = load_balancer_managed_outbound_ip_count is not None or \
load_balancer_outbound_ips is not None or load_balancer_outbound_ip_prefixes is not None
if (update_autoscaler != 1 and not update_lb_profile and
not attach_acr and
not detach_acr and
api_server_authorized_ip_ranges is None):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--load-balancer-managed-outbound-ip-count",'
'"--load-balancer-outbound-ips",'
'"--load-balancer-outbound-ip-prefixes",'
'"--attach-acr" or "--dettach-acr",'
'"--"api-server-authorized-ip-ranges')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
node_count = instance.agent_pool_profiles[0].count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
load_balancer_profile = _get_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes)
if load_balancer_profile:
instance.network_profile.load_balancer_profile = load_balancer_profile
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, control_plane_only=False,
no_wait=False, **kwargs):
instance = client.get(resource_group_name, name)
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
from knack.prompting import prompt_y_n
upgrade_all = False
instance.kubernetes_version = kubernetes_version
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
if 'kube-dashboard' in addon_args and 'kubeDashboard' not in addon_profiles:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if addon == 'aciConnector':
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# addon name is case insensitive
addon = next((x for x in addon_profiles.keys() if x.lower() == addon.lower()), addon)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
elif addon.lower() == ('aciConnector' + os_type).lower():
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {'SubnetName': subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles['kubeDashboard'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
logger.error("AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
if 'loganalyticsworkspaceresourceid' in addon.config:
addon.config['logAnalyticsWorkspaceResourceID'] = addon.config.pop('loganalyticsworkspaceresourceid')
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
node_vm_size=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
raise CLIError('Windows nodepool is not supported')
node_vm_size = "Standard_DS2_v2"
agent_pool = AgentPool(
name=nodepool_name,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
node_taints=taints_array
)
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if new_node_count == 0:
raise CLIError("Can't scale down to 0 nodes.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
kubernetes_version,
nodepool_name,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
no_wait=False):
update_flags = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_flags != 1:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
node_count = instance.count
_validate_autoscaler_update_counts(min_count, max_count, node_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, node_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError("Current node count '{}' is not in the range of min-count and max-count.".format(node_count))
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _get_load_balancer_outbound_ips(load_balancer_outbound_ips):
"""parse load balancer profile outbound IP ids and return an array of references to the outbound IP resources"""
load_balancer_outbound_ip_resources = None
if load_balancer_outbound_ips:
load_balancer_outbound_ip_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ips.split(',')]
return load_balancer_outbound_ip_resources
def _get_load_balancer_outbound_ip_prefixes(load_balancer_outbound_ip_prefixes):
"""parse load balancer profile outbound IP prefix ids and return an array \
of references to the outbound IP prefix resources"""
load_balancer_outbound_ip_prefix_resources = None
if load_balancer_outbound_ip_prefixes:
load_balancer_outbound_ip_prefix_resources = \
[ResourceReference(id=x.strip()) for x in load_balancer_outbound_ip_prefixes.split(',')]
return load_balancer_outbound_ip_prefix_resources
def _get_load_balancer_profile(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes):
"""parse and build load balancer profile"""
load_balancer_outbound_ip_resources = _get_load_balancer_outbound_ips(load_balancer_outbound_ips)
load_balancer_outbound_ip_prefix_resources = _get_load_balancer_outbound_ip_prefixes(
load_balancer_outbound_ip_prefixes)
load_balancer_profile = None
if any([load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ip_resources,
load_balancer_outbound_ip_prefix_resources]):
load_balancer_profile = ManagedClusterLoadBalancerProfile()
if load_balancer_managed_outbound_ip_count:
load_balancer_profile.managed_outbound_ips = ManagedClusterLoadBalancerProfileManagedOutboundIPs(
count=load_balancer_managed_outbound_ip_count
)
if load_balancer_outbound_ip_resources:
load_balancer_profile.outbound_ips = ManagedClusterLoadBalancerProfileOutboundIPs(
public_ips=load_balancer_outbound_ip_resources
)
if load_balancer_outbound_ip_prefix_resources:
load_balancer_profile.outbound_ip_prefixes = ManagedClusterLoadBalancerProfileOutboundIPPrefixes(
public_ip_prefixes=load_balancer_outbound_ip_prefix_resources
)
return load_balancer_profile
|
discovery.py
|
"""
Copyright 2019 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ('Monitor', )
from .logger import root_logger
from .device_manager import DeviceManager
from .device import device_type_map
from .configuration import config
from subprocess import call, check_output, DEVNULL
from socket import gethostbyname, getfqdn
from threading import Thread
from platform import system
from os import getenv
from requests import get, exceptions
import time, cc_lib
logger = root_logger.getChild(__name__.split(".", 1)[-1])
def ping(host) -> bool:
return call(['ping', '-c', '2', '-t', '2', host], stdout=DEVNULL, stderr=DEVNULL) == 0
def getLocalIP() -> str:
try:
if config.RuntimeEnv.container:
host_ip = getenv("HOST_IP")
if not host_ip:
raise Exception("environment variable 'HOST_IP' not set")
return host_ip
else:
sys_type = system().lower()
if 'linux' in sys_type:
local_ip = check_output(['hostname', '-I']).decode()
local_ip = local_ip.replace(' ', '')
local_ip = local_ip.replace('\n', '')
return local_ip
elif 'darwin' in sys_type:
local_ip = gethostbyname(getfqdn())
if type(local_ip) is str and local_ip.count('.') == 3:
return local_ip
else:
raise Exception("platform not supported")
except Exception as ex:
logger.critical("could not get local ip - {}".format(ex))
exit()
def getIpRange(local_ip) -> list:
split_ip = local_ip.rsplit('.', 1)
base_ip = split_ip[0] + '.'
if len(split_ip) > 1:
ip_range = [str(base_ip) + str(i) for i in range(2,255)]
ip_range.remove(local_ip)
return ip_range
return list()
def discoverHostsWorker(ip_range, alive_hosts):
for ip in ip_range:
if ping(ip):
alive_hosts.append(ip)
def discoverHosts() -> list:
ip_range = getIpRange(getLocalIP())
alive_hosts = list()
workers = list()
bin = 0
bin_size = 3
if ip_range:
for i in range(int(len(ip_range) / bin_size)):
worker = Thread(target=discoverHostsWorker, name='discoverHostsWorker', args=(ip_range[bin:bin+bin_size], alive_hosts))
workers.append(worker)
worker.start()
bin = bin + bin_size
if ip_range[bin:]:
worker = Thread(target=discoverHostsWorker, name='discoverHostsWorker', args=(ip_range[bin:], alive_hosts))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
return alive_hosts
class Monitor(Thread):
def __init__(self, device_manager: DeviceManager, client: cc_lib.client.Client):
super().__init__(name="monitor", daemon=True)
self.__device_manager = device_manager
self.__client = client
def _validateHostsWorker(self, hosts, valid_hosts):
for host in hosts:
try:
response = get(url="http://{}/{}".format(host, config.Api.air_sensor_device), timeout=5)
if response.status_code == 200 and 'blebox' in response.headers.get('Server'):
host_info = response.json()
if "device" in host_info.keys():
host_info = host_info.get("device")
valid_hosts[host_info.get('id')] = (
{
"name": host_info.get("deviceName"),
"ip": host
},
{
"type": host_info.get("type"),
"reachable": True
}
)
except exceptions.RequestException:
pass
def _validateHosts(self, hosts) -> dict:
valid_hosts = dict()
workers = list()
bin = 0
bin_size = 2
if len(hosts) <= bin_size:
worker = Thread(target=self._validateHostsWorker, name='validateHostsWorker', args=(hosts, valid_hosts))
workers.append(worker)
worker.start()
else:
for i in range(int(len(hosts) / bin_size)):
worker = Thread(target=self._validateHostsWorker, name='validateHostsWorker',
args=(hosts[bin:bin + bin_size], valid_hosts))
workers.append(worker)
worker.start()
bin = bin + bin_size
if hosts[bin:]:
worker = Thread(target=self._validateHostsWorker, name='validateHostsWorker', args=(hosts[bin:], valid_hosts))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
return valid_hosts
def _diff(self, known, unknown) -> tuple:
known_set = set(known)
unknown_set = set(unknown)
missing = known_set - unknown_set
new = unknown_set - known_set
changed = {key for key in known_set & unknown_set if dict(known[key]) != unknown[key][0]}
return missing, new, changed
def _evaluate(self, queried_devices):
missing_devices, new_devices, changed_devices = self._diff(self.__device_manager.devices, queried_devices)
updated_devices = list()
if missing_devices:
for device_id in missing_devices:
logger.info("can't find '{}' with id '{}'".format(
self.__device_manager.get(device_id).name, device_id)
)
try:
self.__client.disconnectDevice(device_id)
self.__device_manager.get(device_id).reachable = False
except (cc_lib.client.DeviceDisconnectError, cc_lib.client.NotConnectedError):
pass
if new_devices:
futures = list()
for device_id in new_devices:
device = device_type_map[queried_devices[device_id][1]["type"]](device_id, **queried_devices[device_id][0])
logger.info("found '{}' with id '{}'".format(device.name, device.id))
futures.append((device, self.__client.addDevice(device, asynchronous=True)))
for device, future in futures:
future.wait()
try:
future.result()
self.__device_manager.add(device, queried_devices[device.id][1]["type"])
self.__client.connectDevice(device, asynchronous=True)
device.reachable = True
except (cc_lib.client.DeviceAddError, cc_lib.client.DeviceUpdateError):
pass
if changed_devices:
futures = list()
for device_id in changed_devices:
device = self.__device_manager.get(device_id)
prev_device_name = device.name
prev_device_reachable_state = device.reachable
device.name = queried_devices[device_id][0]["name"]
device.ip = queried_devices[device_id][0]["ip"]
device.reachable = queried_devices[device_id][1]["reachable"]
if device.reachable != prev_device_reachable_state:
if device.reachable:
self.__client.connectDevice(device, asynchronous=True)
else:
self.__client.disconnectDevice(device, asynchronous=True)
if device.name != prev_device_name:
futures.append((device, prev_device_name, self.__client.updateDevice(device, asynchronous=True)))
for device, prev_device_name, future in futures:
future.wait()
try:
future.result()
updated_devices.append(device.id)
self.__device_manager.update(device)
except cc_lib.client.DeviceUpdateError:
device.name = prev_device_name
if any((missing_devices, new_devices, updated_devices)):
try:
self.__client.syncHub(list(self.__device_manager.devices.values()), asynchronous=True)
except cc_lib.client.HubError:
pass
def run(self):
while True:
unknown_devices = self._validateHosts(discoverHosts())
self._evaluate(unknown_devices)
time.sleep(120)
|
server.py
|
import socket
import threading
import struct
class Server(object):
def __init__(self):
self.host = "0.0.0.0"
self.port = 9001
self.server_name = (self.host, self.port)
self.recv_size = 11
def start_server(self):
# 创建socket的工具对象
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 设置socket 重用地址
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 绑定地址
sock.bind((self.host, self.port))
# 监听客户端
sock.listen(128)
print("监听中...")
# 接受客户端的连接请求
while True:
client_sock, client_addr = sock.accept()
print('与客户端%s建立了连接' % str(client_addr))
# print("client_sock: {}".format(client_sock))
# 创建子线程处理这个客户端
t = threading.Thread(target=self._handler, args=(client_sock, client_addr))
# 开启子线程执行
t.start()
def _handler(self, client_sock, client_addr):
recv_data = b''
# 循环接收
while True:
msg = client_sock.recv(self.recv_size)
print(len(msg))
recv_data += msg
if len(msg) < self.recv_size:
break
elif len(msg) == self.recv_size:
count = recv_data[:4]
count_value = struct.unpack("!I", count)[0]
print(count_value)
end_string = recv_data[-2:].decode("utf-8")
print(end_string)
if (len(msg) == count_value) and (end_string == "\r\n"):
break
else:
pass
recv_data = recv_data[4:-2]
# 把接收到的数据进行解码
strData = recv_data.decode("utf-8")
print(client_addr)
print(strData)
print(self.server_name)
print(strData)
client_sock.send(strData.encode("utf-8"))
if __name__ == '__main__':
server = Server()
server.start_server()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'needs os.fork()')
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
sys.exit()
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
avnet_face_detection_mt.py
|
'''
Copyright 2020 Avnet Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# USAGE
# python avnet_face_detection_mt.py [--input 0] [--detthreshold 0.55] [--nmsthreshold 0.35] [--threads 4]
import numpy as np
import argparse
import imutils
import time
import cv2
import os, errno
import sys
import threading
import queue
from imutils.video import FPS
from vitis_ai_vart.facedetect import FaceDetect
import runner
import xir.graph
import pathlib
import xir.subgraph
def get_subgraph (g):
sub = []
root = g.get_root_subgraph()
sub = [ s for s in root.children
if s.metadata.get_attr_str ("device") == "DPU"]
return sub
global bQuit
def taskCapture(inputId,queueIn):
global bQuit
#print("[INFO] taskCapture : starting thread ...")
# Start the FPS counter
fpsIn = FPS().start()
# Initialize the camera input
print("[INFO] taskCapture : starting camera input ...")
cam = cv2.VideoCapture(inputId)
cam.set(cv2.CAP_PROP_FRAME_WIDTH,640)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT,480)
if not (cam.isOpened()):
print("[ERROR] taskCapture : Failed to open camera ", inputId )
exit()
while not bQuit:
# Capture image from camera
ret,frame = cam.read()
# Update the FPS counter
fpsIn.update()
# Push captured image to input queue
queueIn.put(frame)
# Stop the timer and display FPS information
fpsIn.stop()
print("[INFO] taskCapture : elapsed time: {:.2f}".format(fpsIn.elapsed()))
print("[INFO] taskCapture : elapsed FPS: {:.2f}".format(fpsIn.fps()))
#print("[INFO] taskCapture : exiting thread ...")
def taskWorker(worker,dpu,detThreshold,nmsThreshold,queueIn,queueOut):
global bQuit
#print("[INFO] taskWorker[",worker,"] : starting thread ...")
# Start the face detector
dpu_face_detector = FaceDetect(dpu,detThreshold,nmsThreshold)
dpu_face_detector.start()
while not bQuit:
# Pop captured image from input queue
frame = queueIn.get()
# Vitis-AI/DPU based face detector
faces = dpu_face_detector.process(frame)
# loop over the faces
for i,(left,top,right,bottom) in enumerate(faces):
# draw a bounding box surrounding the object so we can
# visualize it
cv2.rectangle( frame, (left,top), (right,bottom), (0,255,0), 2)
# Push processed image to output queue
queueOut.put(frame)
# Stop the face detector
dpu_face_detector.stop()
# workaround : to ensure other worker threads stop,
# make sure input queue is not empty
queueIn.put(frame)
#print("[INFO] taskWorker[",worker,"] : exiting thread ...")
def taskDisplay(queueOut):
global bQuit
#print("[INFO] taskDisplay : starting thread ...")
# Start the FPS counter
fpsOut = FPS().start()
while not bQuit:
# Pop processed image from output queue
frame = queueOut.get()
# Display the processed image
cv2.imshow("Face Detection", frame)
# Update the FPS counter
fpsOut.update()
# if the `q` key was pressed, break from the loop
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# Trigger all threads to stop
bQuit = True
# Stop the timer and display FPS information
fpsOut.stop()
print("[INFO] taskDisplay : elapsed time: {:.2f}".format(fpsOut.elapsed()))
print("[INFO] taskDisplay : elapsed FPS: {:.2f}".format(fpsOut.fps()))
# Cleanup
cv2.destroyAllWindows()
#print("[INFO] taskDisplay : exiting thread ...")
def main(argv):
global bQuit
bQuit = False
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=False,
help = "input camera identifier (default = 0)")
ap.add_argument("-d", "--detthreshold", required=False,
help = "face detector softmax threshold (default = 0.55)")
ap.add_argument("-n", "--nmsthreshold", required=False,
help = "face detector NMS threshold (default = 0.35)")
ap.add_argument("-t", "--threads", required=False,
help = "number of worker threads (default = 4)")
args = vars(ap.parse_args())
if not args.get("input",False):
inputId = 0
else:
inputId = int(args["input"])
print('[INFO] input camera identifier = ',inputId)
if not args.get("detthreshold",False):
detThreshold = 0.55
else:
detThreshold = float(args["detthreshold"])
print('[INFO] face detector - softmax threshold = ',detThreshold)
if not args.get("nmsthreshold",False):
nmsThreshold = 0.35
else:
nmsThreshold = float(args["nmsthreshold"])
print('[INFO] face detector - NMS threshold = ',nmsThreshold)
if not args.get("threads",False):
threads = 4
else:
threads = int(args["threads"])
print('[INFO] number of worker threads = ', threads )
# Initialize VART API
densebox_elf = "/usr/share/vitis_ai_library/models/densebox_640_360/densebox_640_360.elf"
densebox_graph = xir.graph.Graph.deserialize(pathlib.Path(densebox_elf))
densebox_subgraphs = get_subgraph(densebox_graph)
assert len(densebox_subgraphs) == 1 # only one DPU kernel
all_dpu_runners = [];
for i in range(int(threads)):
all_dpu_runners.append(runner.Runner(densebox_subgraphs[0], "run"));
# Init synchronous queues for inter-thread communication
queueIn = queue.Queue()
queueOut = queue.Queue()
# Launch threads
threadAll = []
tc = threading.Thread(target=taskCapture, args=(inputId,queueIn))
threadAll.append(tc)
for i in range(threads):
tw = threading.Thread(target=taskWorker, args=(i,all_dpu_runners[i],detThreshold,nmsThreshold,queueIn,queueOut))
threadAll.append(tw)
td = threading.Thread(target=taskDisplay, args=(queueOut,))
threadAll.append(td)
for x in threadAll:
x.start()
# Wait for all threads to stop
for x in threadAll:
x.join()
# Cleanup VART API
del dpu
if __name__ == "__main__":
main(sys.argv)
|
movementControl.py
|
import time
from threading import Thread
from templates.workerprocess import WorkerProcess
class MovementControl(WorkerProcess):
# ===================================== Worker process =========================================
def __init__(self, inPs, outPs):
"""Controls the speed and steering of the vehicle
Parameters
------------
inPs : list(Pipe)
List of input pipes (0 - steering angle)
outPs : list(Pipe)
List of output pipes (order does not matter)
"""
# Initialize parameters
self.angle = 0.0
self.speed = 21.0
super(MovementControl,self).__init__(inPs, outPs)
def _init_threads(self):
"""Initialize the a thread for initial start and a thread for listening for the steering angle.
"""
startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))
self.threads.append(startTh)
sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))
self.threads.append(sendTh)
signTh = Thread(name='SignListen',target = self._listen_for_stop, args = (self.inPs[1], self.outPs, ))
self.threads.append(signTh)
def run(self):
"""Apply the initializing methods and start the threads
"""
super(MovementControl,self).run()
def stop(self):
"""Apply the stopping methods and stops the threads
"""
# Make a reset before stop
self.speed = 0.0
self.angle = 0.0
self._singleUpdate(self.outPs)
super(MovementControl, self).stop()
# ===================================== Custom methods =========================================
def _listen_for_steering(self, inP, outPs):
"""Get the current needed value for the steering angle
"""
while True:
try:
# Get the value through the pipe
value = inP.recv()
# Write the value
self.angle = float(value)
# Update the value on Nucleo
self._singleUpdate(outPs)
except Exception as e:
print("Listening error:")
print(e)
def _listen_for_stop(self, inP, outPs):
while True:
try:
value = inP.recv()
if value == 0:
self.speed = 0.0
if value == 1:
self.speed = 0.0
self._singleUpdate(outPs)
time.sleep(2)
self.speed = 21.0
self._singleUpdate(outPs)
except Exception as e:
print(e)
def _singleUpdate(self, outPs):
"""Update the state of the controls
"""
# Initialize the data array to be sent
data = {}
# Set longitudinal control
if(self.speed != 0):
data['action'] = 'MCTL'
data['speed'] = float(self.speed/100.0)
else:
data['action'] = 'BRAK'
# Set lateral control
data['steerAngle'] = self.angle
# Send data
try:
for outP in outPs:
outP.send(data)
except Exception as e:
print(e)
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
www2.hursley.ibm.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import glob
import math
import os, sys
import pickle, copy
import unittest
from decimal import *
import numbers
from test.support import run_unittest, run_doctest, is_resource_enabled
import random
try:
import threading
except ImportError:
threading = None
# Useful Test Constant
Signals = tuple(getcontext().flags.keys())
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = (Clamped, Rounded, Inexact, Subnormal,
Underflow, Overflow, DivisionByZero, InvalidOperation)
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
def init():
global ORIGINAL_CONTEXT
ORIGINAL_CONTEXT = getcontext().copy()
DefaultTestContext = Context(
prec = 9,
rounding = ROUND_HALF_EVEN,
traps = dict.fromkeys(Signals, 0)
)
setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# list of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
#Map the test cases' error names to the actual errors
ErrorNames = {'clamped' : Clamped,
'conversion_syntax' : InvalidOperation,
'division_by_zero' : DivisionByZero,
'division_impossible' : InvalidOperation,
'division_undefined' : InvalidOperation,
'inexact' : Inexact,
'invalid_context' : InvalidOperation,
'invalid_operation' : InvalidOperation,
'overflow' : Overflow,
'rounded' : Rounded,
'subnormal' : Subnormal,
'underflow' : Underflow}
def Nonfunction(*args):
"""Doesn't do anything."""
return None
RoundingDict = {'ceiling' : ROUND_CEILING, #Maps test-case names to roundings.
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw
nameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor',
}
# The following functions return True/False rather than a Decimal instance
LOGICAL_FUNCTIONS = (
'is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum',
)
class DecimalTest(unittest.TestCase):
"""Class which tests the Decimal class against the test cases.
Changed for unittest.
"""
def setUp(self):
self.context = Context()
self.ignore_list = ['#']
# Basically, a # means return NaN InvalidOperation.
# Different from a sNaN in trim
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
return
with open(file) as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except DecimalException as exception:
#Exception raised where there shoudn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
return
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, Nonfunction)
funct(value)
def eval_equation(self, s):
#global DEFAULT_PRECISION
#print DEFAULT_PRECISION
if not TEST_ALL and random.random() < 0.90:
return
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in skipped_test_ids:
return
fname = nameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [ErrorNames[x.lower()] for x in exceptions]
for exception in Signals:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = Decimal(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in LOGICAL_FUNCTIONS:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
self.context.clear_flags()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
return
def getexceptions(self):
return [e for e in Signals if self.context.flags[e]]
def change_precision(self, prec):
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
self.context.Emin = exp
def change_max_exponent(self, exp):
self.context.Emax = exp
def change_clamp(self, clamp):
self.context._clamp = clamp
# The following classes test the behaviour of Decimal according to PEP 327
class DecimalExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
def test_explicit_from_string(self):
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
def test_explicit_from_tuples(self):
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_Decimal(self):
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
self.assertNotEqual(id(d), id(e))
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
self.assertNotEqual(id(d), id(e))
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
self.assertNotEqual(id(d), id(e))
def test_explicit_context_create_decimal(self):
nc = copy.copy(getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
def test_unicode_digits(self):
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class DecimalImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + None', globals())
def test_implicit_from_int(self):
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', globals())
def test_implicit_from_float(self):
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', globals())
def test_implicit_from_Decimal(self):
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class DecimalFormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', '123,456 '),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
def test_n_format(self):
try:
from locale import CHAR_MAX
except ImportError:
return
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : [3, 3, 0],
'thousands_sep': ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : [CHAR_MAX],
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping' : [3, 3, 0],
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping' : [1, 4, 2, CHAR_MAX],
'thousands_sep' : '-'
}
def get_fmt(x, locale, fmt='n'):
return Decimal.__format__(Decimal(x), fmt, _localeconv=locale)
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
class DecimalArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
for x, y in [(n, n), (n, i), (i, n), (n, f), (f, n),
(s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)]:
self.assertTrue(x != y)
self.assertTrue(not (x == y))
self.assertTrue(not (x < y))
self.assertTrue(not (x <= y))
self.assertTrue(not (x > y))
self.assertTrue(not (x >= y))
# The following are two functions used to test threading in the next class
def thfunc1(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.synchro.wait()
test2 = d1/d3
cls.finish1.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.3333333333333333333333333333'))
return
def thfunc2(cls):
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.3333333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
return
class DecimalUseOfContextTest(unittest.TestCase):
'''Unit tests for Use of Context cases in Decimal.'''
try:
import threading
except ImportError:
threading = None
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
#Test the "threading isolation" of a Context.
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
return
if threading is None:
del test_threading
class DecimalUsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertTrue(dc > da)
self.assertTrue(dc >= da)
self.assertTrue(da < dc)
self.assertTrue(da <= dc)
self.assertEqual(da, db)
self.assertTrue(da != dc)
self.assertTrue(da <= db)
self.assertTrue(da >= db)
#a Decimal and an int
self.assertTrue(dc > 23)
self.assertTrue(23 < dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_copy_and_deepcopy_methods(self):
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
#just that it's hashable
hash(Decimal(23))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hash(value), hash(int(value)))
#the same hash that to an int
self.assertEqual(hash(Decimal(23)), hash(23))
self.assertRaises(TypeError, hash, Decimal('NaN'))
self.assertTrue(hash(Decimal('Inf')))
self.assertTrue(hash(Decimal('-Inf')))
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
c = getcontext()
old_precision = c.prec
x = Decimal("123456789.1")
c.prec = 6
h1 = hash(x)
c.prec = 10
h2 = hash(x)
c.prec = 16
h3 = hash(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = old_precision
def test_min_and_max_methods(self):
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertTrue(min(d1,d2) is d1)
self.assertTrue(min(d2,d1) is d1)
self.assertTrue(max(d1,d2) is d2)
self.assertTrue(max(d2,d1) is d2)
#between Decimal and long
self.assertTrue(min(d1,l2) is d1)
self.assertTrue(min(l2,d1) is d1)
self.assertTrue(max(l1,d2) is d2)
self.assertTrue(max(d2,l1) is d2)
def test_as_nonzero(self):
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float, int and long methods.
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#long
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_eval_round_trip(self):
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
#inf
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
#coefficient in infinity should be ignored
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_immutability_operations(self):
# Do operations and check that it didn't change change internal objects.
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
return
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_subclassing(self):
# Different behaviours when subclassing Decimal
class MyDecimal(Decimal):
pass
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertTrue(type(d) is Decimal)
d = d1.max(d2)
self.assertTrue(type(d) is Decimal)
def test_implicit_context(self):
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class DecimalPythonAPItests(unittest.TestCase):
def test_abc(self):
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertTrue(not issubclass(Decimal, numbers.Real))
self.assertTrue(isinstance(Decimal(0), numbers.Number))
self.assertTrue(not isinstance(Decimal(0), numbers.Real))
def test_pickle(self):
d = Decimal('-3.141590000')
p = pickle.dumps(d)
e = pickle.loads(p)
self.assertEqual(d, e)
def test_int(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
class MyDecimal(Decimal):
pass
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
class ContextAPItests(unittest.TestCase):
def test_pickle(self):
c = Context()
e = pickle.loads(pickle.dumps(c))
for k in vars(c):
v1 = vars(c)[k]
v2 = vars(e)[k]
self.assertEqual(v1, v2)
def test_equality_with_other_types(self):
self.assertTrue(Decimal(10) in ['a', 1.0, Decimal(10), (1,2), {}])
self.assertTrue(Decimal(10) not in ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
class WithStatementTest(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertTrue(orig_ctx is final_ctx, 'did not restore context correctly')
self.assertTrue(orig_ctx is not set_ctx, 'did not copy the context')
self.assertTrue(set_ctx is enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertTrue(orig_ctx is final_ctx, 'did not restore context correctly')
self.assertTrue(set_ctx.prec == new_ctx.prec, 'did not set correct context')
self.assertTrue(new_ctx is not set_ctx, 'did not copy the context')
self.assertTrue(set_ctx is enter_ctx, '__enter__ returned wrong context')
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
context = Context(prec=9, Emin = -999999999, Emax = 999999999,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-1000000009")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
context._raise_error(flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_main(arith=False, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init()
global TEST_ALL, DEBUG
TEST_ALL = arith or is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = [
DecimalExplicitConstructionTest,
DecimalImplicitConstructionTest,
DecimalArithmeticOperatorsTest,
DecimalFormatTest,
DecimalUseOfContextTest,
DecimalUsabilityTest,
DecimalPythonAPItests,
ContextAPItests,
DecimalTest,
WithStatementTest,
ContextFlags
]
else:
test_classes = [DecimalTest]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(DecimalTest, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
import decimal as DecimalModule
run_doctest(DecimalModule, verbose)
finally:
setcontext(ORIGINAL_CONTEXT)
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
test_logging.py
|
# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import copy
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok, assert_python_failure
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.logging_helper import TestHandler
import textwrap
import threading
import time
import unittest
import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = threading_helper.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
threading_helper.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args, encoding="utf-8")
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, encoding='utf-8', delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
# The implementation relies on os.register_at_fork existing, but we test
# based on os.fork existing because that is what users and this test use.
# This helps ensure that when fork exists (the important concept) that the
# register_at_fork mechanism is also present and used.
@unittest.skipIf(not hasattr(os, 'fork'), 'Test requires os.fork().')
def test_post_fork_child_no_deadlock(self):
"""Ensure child logging locks are not held; bpo-6721 & bpo-36533."""
class _OurHandler(logging.Handler):
def __init__(self):
super().__init__()
self.sub_handler = logging.StreamHandler(
stream=open('/dev/null', 'wt', encoding='utf-8'))
def emit(self, record):
self.sub_handler.acquire()
try:
self.sub_handler.emit(record)
finally:
self.sub_handler.release()
self.assertEqual(len(logging._handlers), 0)
refed_h = _OurHandler()
self.addCleanup(refed_h.sub_handler.stream.close)
refed_h.name = 'because we need at least one for this test'
self.assertGreater(len(logging._handlers), 0)
self.assertGreater(len(logging._at_fork_reinit_lock_weakset), 1)
test_logger = logging.getLogger('test_post_fork_child_no_deadlock')
test_logger.addHandler(refed_h)
test_logger.setLevel(logging.DEBUG)
locks_held__ready_to_fork = threading.Event()
fork_happened__release_locks_and_end_thread = threading.Event()
def lock_holder_thread_fn():
logging._acquireLock()
try:
refed_h.acquire()
try:
# Tell the main thread to do the fork.
locks_held__ready_to_fork.set()
# If the deadlock bug exists, the fork will happen
# without dealing with the locks we hold, deadlocking
# the child.
# Wait for a successful fork or an unreasonable amount of
# time before releasing our locks. To avoid a timing based
# test we'd need communication from os.fork() as to when it
# has actually happened. Given this is a regression test
# for a fixed issue, potentially less reliably detecting
# regression via timing is acceptable for simplicity.
# The test will always take at least this long. :(
fork_happened__release_locks_and_end_thread.wait(0.5)
finally:
refed_h.release()
finally:
logging._releaseLock()
lock_holder_thread = threading.Thread(
target=lock_holder_thread_fn,
name='test_post_fork_child_no_deadlock lock holder')
lock_holder_thread.start()
locks_held__ready_to_fork.wait()
pid = os.fork()
if pid == 0:
# Child process
try:
test_logger.info(r'Child process did not deadlock. \o/')
finally:
os._exit(0)
else:
# Parent process
test_logger.info(r'Parent process returned from fork. \o/')
fork_happened__release_locks_and_end_thread.set()
lock_holder_thread.join()
support.wait_process(pid, exitcode=0)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamWithIntName(object):
level = logging.NOTSET
name = 2
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
def test_can_represent_stream_with_int_name(self):
h = logging.StreamHandler(StreamWithIntName())
self.assertEqual(repr(h), '<StreamHandler 2 (NOTSET)>')
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self._quit = False
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
while not self._quit:
asyncore.loop(poll_interval, map=self._map, count=1)
def stop(self):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
"""
self._quit = True
threading_helper.join_thread(self._thread)
self._thread = None
self.close()
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.daemon = True
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self):
"""
Tell the server thread to stop, and wait for it to do so.
"""
self.shutdown()
if self._thread is not None:
threading_helper.join_thread(self._thread)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever
TIMEOUT = support.LONG_TIMEOUT
def test_basic(self):
sockmap = {}
server = TestSMTPServer((socket_helper.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (socket_helper.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
def test_race_between_set_target_and_flush(self):
class MockRaceConditionHandler:
def __init__(self, mem_hdlr):
self.mem_hdlr = mem_hdlr
self.threads = []
def removeTarget(self):
self.mem_hdlr.setTarget(None)
def handle(self, msg):
thread = threading.Thread(target=self.removeTarget)
self.threads.append(thread)
thread.start()
target = MockRaceConditionHandler(self.mem_hdlr)
try:
self.mem_hdlr.setTarget(target)
for _ in range(10):
time.sleep(0.005)
self.mem_logger.info("not flushed")
self.mem_logger.warning("flushed")
finally:
for thread in target.threads:
threading_helper.join_thread(thread)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
kwargs={{"encoding": "utf-8"}}
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, encoding="utf-8", **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_config_set_handler_names(self):
test_config = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
handlers=hand1
[handler_hand1]
class=StreamHandler
formatter=form1
[formatter_form1]
format=%(levelname)s ++ %(message)s
"""
self.apply_config(test_config)
self.assertEqual(logging.getLogger().handlers[0].name, 'hand1')
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
encoding="utf-8",
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop()
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop()
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
os_helper.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop()
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
os_helper.unlink(self.address)
@unittest.skipUnless(socket_helper.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop()
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
class myCustomFormatter:
def __init__(self, fmt, datefmt=None):
pass
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = warnings_helper.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as '()' key and 'validate' set to False
custom_formatter_class_validate = {
'version': 1,
'formatters': {
'form1': {
'()': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom logging.Formatter subclass as 'class' key and 'validate' set to False
custom_formatter_class_validate2 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.ExceptionFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom class that is not inherited from logging.Formatter
custom_formatter_class_validate3 = {
'version': 1,
'formatters': {
'form1': {
'class': __name__ + '.myCustomFormatter',
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
# Configuration with custom function and 'validate' set to False
custom_formatter_with_function = {
'version': 1,
'formatters': {
'form1': {
'()': formatFunc,
'format': '%(levelname)s:%(name)s:%(message)s',
'validate': False,
},
},
'handlers' : {
'hand1' : {
'class': 'logging.StreamHandler',
'formatter': 'form1',
'level': 'NOTSET',
'stream': 'ext://sys.stdout',
},
},
"loggers": {
"my_test_logger_custom_formatter": {
"level": "DEBUG",
"handlers": ["hand1"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn,
"encoding": "utf-8",
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
threading_helper.join_thread(t)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.assertRaises(ValueError, self.apply_config, self.out_of_order)
def test_out_of_order_with_dollar_style(self):
config = copy.deepcopy(self.out_of_order)
config['formatters']['mySimpleFormatter']['format'] = "${asctime} (${name}) ${levelname}: ${message}"
self.apply_config(config)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_custom_formatter_class_with_validate(self):
self.apply_config(self.custom_formatter_class_validate)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2(self):
self.apply_config(self.custom_formatter_class_validate2)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate2_with_wrong_fmt(self):
config = self.custom_formatter_class_validate.copy()
config['formatters']['form1']['style'] = "$"
# Exception should not be raise as we have configured 'validate' to False
self.apply_config(config)
handler = logging.getLogger("my_test_logger_custom_formatter").handlers[0]
self.assertIsInstance(handler.formatter, ExceptionFormatter)
def test_custom_formatter_class_with_validate3(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_class_validate3)
def test_custom_formatter_function_with_validate(self):
self.assertRaises(ValueError, self.apply_config, self.custom_formatter_with_function)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
def test_namedtuple(self):
# see bpo-39142
from collections import namedtuple
class MyHandler(logging.StreamHandler):
def __init__(self, resource, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resource: namedtuple = resource
def emit(self, record):
record.msg += f' {self.resource.type}'
return super().emit(record)
Resource = namedtuple('Resource', ['type', 'labels'])
resource = Resource(type='my_type', labels=['a'])
config = {
'version': 1,
'handlers': {
'myhandler': {
'()': MyHandler,
'resource': resource
}
},
'root': {'level': 'INFO', 'handlers': ['myhandler']},
}
with support.captured_stderr() as stderr:
self.apply_config(config)
logging.info('some log')
self.assertEqual(stderr.getvalue(), 'some log my_type\n')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
handler.close()
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_StreamHandler(self):
# Test that traceback only appends once (bpo-34334).
listener = logging.handlers.QueueListener(self.queue, self.root_hdlr)
listener.start()
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.que_logger.exception(self.next_message(), exc_info=exc)
listener.stop()
self.assertEqual(self.stream.getvalue().strip().count('Traceback'), 1)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener_with_multiple_handlers(self):
# Test that queue handler format doesn't affect other handler formats (bpo-35726).
self.que_hdlr.setFormatter(self.root_formatter)
self.que_logger.addHandler(self.root_hdlr)
listener = logging.handlers.QueueListener(self.queue, self.que_hdlr)
listener.start()
self.que_logger.error("error")
listener.stop()
self.assertEqual(self.stream.getvalue().strip(), "que -> ERROR: error")
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# bpo-28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.skip_if_broken_multiprocessing_synchronize()
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
def test_calls_task_done_after_stop(self):
# Issue 36813: Make sure queue.join does not deadlock.
log_queue = queue.Queue()
listener = logging.handlers.QueueListener(log_queue)
listener.start()
listener.stop()
with self.assertRaises(ValueError):
# Make sure all tasks are done and .join won't block.
log_queue.task_done()
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class AssertErrorMessage:
def assert_error_message(self, exception, message, *args, **kwargs):
try:
self.assertRaises((), *args, **kwargs)
except exception as e:
self.assertEqual(message, str(e))
class FormatterTest(unittest.TestCase, AssertErrorMessage):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
'custom': {
'custom': 1234
}
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)#15s')
self.assertTrue(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(ValueError, f.format, r)
f = logging.Formatter("{message}", style='{')
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('${message}', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(ValueError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${message}', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}--', style='$')
self.assertTrue(f.usesTime())
def test_format_validate(self):
# Check correct formatting
# Percentage style
f = logging.Formatter("%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
self.assertEqual(f._fmt, "%(levelname)-15s - %(message) 5s - %(process)03d - %(module) - %(asctime)*.3s")
f = logging.Formatter("%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
self.assertEqual(f._fmt, "%(asctime)*s - %(asctime)*.3s - %(process)-34.33o")
f = logging.Formatter("%(process)#+027.23X")
self.assertEqual(f._fmt, "%(process)#+027.23X")
f = logging.Formatter("%(foo)#.*g")
self.assertEqual(f._fmt, "%(foo)#.*g")
# StrFormat Style
f = logging.Formatter("$%{message}%$ - {asctime!a:15} - {customfield['key']}", style="{")
self.assertEqual(f._fmt, "$%{message}%$ - {asctime!a:15} - {customfield['key']}")
f = logging.Formatter("{process:.2f} - {custom.f:.4f}", style="{")
self.assertEqual(f._fmt, "{process:.2f} - {custom.f:.4f}")
f = logging.Formatter("{customfield!s:#<30}", style="{")
self.assertEqual(f._fmt, "{customfield!s:#<30}")
f = logging.Formatter("{message!r}", style="{")
self.assertEqual(f._fmt, "{message!r}")
f = logging.Formatter("{message!s}", style="{")
self.assertEqual(f._fmt, "{message!s}")
f = logging.Formatter("{message!a}", style="{")
self.assertEqual(f._fmt, "{message!a}")
f = logging.Formatter("{process!r:4.2}", style="{")
self.assertEqual(f._fmt, "{process!r:4.2}")
f = logging.Formatter("{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}", style="{")
self.assertEqual(f._fmt, "{process!s:<#30,.12f}- {custom:=+#30,.1d} - {module:^30}")
f = logging.Formatter("{process!s:{w},.{p}}", style="{")
self.assertEqual(f._fmt, "{process!s:{w},.{p}}")
f = logging.Formatter("{foo:12.{p}}", style="{")
self.assertEqual(f._fmt, "{foo:12.{p}}")
f = logging.Formatter("{foo:{w}.6}", style="{")
self.assertEqual(f._fmt, "{foo:{w}.6}")
f = logging.Formatter("{foo[0].bar[1].baz}", style="{")
self.assertEqual(f._fmt, "{foo[0].bar[1].baz}")
f = logging.Formatter("{foo[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{foo[k1].bar[k2].baz}")
f = logging.Formatter("{12[k1].bar[k2].baz}", style="{")
self.assertEqual(f._fmt, "{12[k1].bar[k2].baz}")
# Dollar style
f = logging.Formatter("${asctime} - $message", style="$")
self.assertEqual(f._fmt, "${asctime} - $message")
f = logging.Formatter("$bar $$", style="$")
self.assertEqual(f._fmt, "$bar $$")
f = logging.Formatter("$bar $$$$", style="$")
self.assertEqual(f._fmt, "$bar $$$$") # this would print two $($$)
# Testing when ValueError being raised from incorrect format
# Percentage Style
self.assertRaises(ValueError, logging.Formatter, "%(asctime)Z")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)b")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)*3s")
self.assertRaises(ValueError, logging.Formatter, "%(asctime)_")
self.assertRaises(ValueError, logging.Formatter, '{asctime}')
self.assertRaises(ValueError, logging.Formatter, '${message}')
self.assertRaises(ValueError, logging.Formatter, '%(foo)#12.3*f') # with both * and decimal number as precision
self.assertRaises(ValueError, logging.Formatter, '%(foo)0*.8*f')
# StrFormat Style
# Testing failure for '-' in field name
self.assert_error_message(
ValueError,
"invalid format: invalid field name/expression: 'name-thing'",
logging.Formatter, "{name-thing}", style="{"
)
# Testing failure for style mismatch
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '%(asctime)s', style='{'
)
# Testing failure for invalid conversion
self.assert_error_message(
ValueError,
"invalid conversion: 'Z'"
)
self.assertRaises(ValueError, logging.Formatter, '{asctime!s:#30,15f}', style='{')
self.assert_error_message(
ValueError,
"invalid format: expected ':' after conversion specifier",
logging.Formatter, '{asctime!aa:15}', style='{'
)
# Testing failure for invalid spec
self.assert_error_message(
ValueError,
"invalid format: bad specifier: '.2ff'",
logging.Formatter, '{process:.2ff}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{process:.2Z}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<##30,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:<#30#,12g}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{process!s:{{w}},{{p}}}', style='{')
# Testing failure for mismatch braces
self.assert_error_message(
ValueError,
"invalid format: expected '}' before end of string",
logging.Formatter, '{process', style='{'
)
self.assert_error_message(
ValueError,
"invalid format: Single '}' encountered in format string",
logging.Formatter, 'process}', style='{'
)
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{{foo!r:4.2}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo/bar}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo:{{w}}.{{p}}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!X:{{w}}.{{p}}}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:random}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{dom}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo!a:ran{d}om}', style='{')
self.assertRaises(ValueError, logging.Formatter, '{foo.!a:d}', style='{')
# Dollar style
# Testing failure for mismatch bare $
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, '$bar $$$', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'bar $', style='$'
)
self.assert_error_message(
ValueError,
"invalid format: bare \'$\' not allowed",
logging.Formatter, 'foo $.', style='$'
)
# Testing failure for mismatch style
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, '{asctime}', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '%(asctime)s', style='$')
# Testing failure for incorrect fields
self.assert_error_message(
ValueError,
"invalid format: no fields",
logging.Formatter, 'foo', style='$'
)
self.assertRaises(ValueError, logging.Formatter, '${asctime', style='$')
def test_defaults_parameter(self):
fmts = ['%(custom)s %(message)s', '{custom} {message}', '$custom $message']
styles = ['%', '{', '$']
for fmt, style in zip(fmts, styles):
f = logging.Formatter(fmt, style=style, defaults={'custom': 'Default'})
r = self.get_record()
self.assertEqual(f.format(r), 'Default Message with 2 placeholders')
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
# Without default
f = logging.Formatter(fmt, style=style)
r = self.get_record()
self.assertRaises(ValueError, f.format, r)
# Non-existing default is ignored
f = logging.Formatter(fmt, style=style, defaults={'Non-existing': 'Default'})
r = self.get_record("custom")
self.assertEqual(f.format(r), '1234 Message with 2 placeholders')
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
def test_default_msec_format_none(self):
class NoMsecFormatter(logging.Formatter):
default_msec_format = None
default_time_format = '%d/%m/%Y %H:%M:%S'
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 123, utc)
r.created = time.mktime(dt.astimezone(None).timetuple())
f = NoMsecFormatter()
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '21/04/1993 08:03:00')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
self.assertRaises(ValueError, logging.disable, "doesnotexists")
class _NotAnIntOrString:
pass
self.assertRaises(TypeError, logging.disable, _NotAnIntOrString())
logging.disable("WARN")
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
def test_subclass_logger_cache(self):
# bpo-37258
message = []
class MyLogger(logging.getLoggerClass()):
def __init__(self, name='MyLogger', level=logging.NOTSET):
super().__init__(name, level)
message.append('initialized')
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('just_some_logger')
self.assertEqual(message, ['initialized'])
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger.addHandler(h)
try:
logger.setLevel(logging.DEBUG)
logger.debug("hello")
self.assertEqual(stream.getvalue().strip(), "hello")
stream.truncate(0)
stream.seek(0)
logger.setLevel(logging.INFO)
logger.debug("hello")
self.assertEqual(stream.getvalue(), "")
finally:
logger.removeHandler(h)
h.close()
logging.setLoggerClass(logging.Logger)
def test_logging_at_shutdown(self):
# bpo-20037: Doing text I/O late at interpreter shutdown must not crash
code = textwrap.dedent("""
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()
""")
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
def test_logging_at_shutdown_open(self):
# bpo-26789: FileHandler keeps a reference to the builtin open()
# function to be able to open or reopen the file during Python
# finalization.
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
code = textwrap.dedent(f"""
import builtins
import logging
class A:
def __del__(self):
logging.error("log in __del__")
# basicConfig() opens the file, but logging.shutdown() closes
# it at Python exit. When A.__del__() is called,
# FileHandler._open() must be called again to re-open the file.
logging.basicConfig(filename={filename!r}, encoding="utf-8")
a = A()
# Simulate the Python finalization which removes the builtin
# open() function.
del builtins.open
""")
assert_python_ok("-c", code)
with open(filename, encoding="utf-8") as fp:
self.assertEqual(fp.read().rstrip(), "ERROR:root:log in __del__")
def test_recursion_error(self):
# Issue 36272
code = textwrap.dedent("""
import logging
def rec():
logging.error("foo")
rec()
rec()
""")
rc, out, err = assert_python_failure("-c", code)
err = err.decode()
self.assertNotIn("Cannot recover from stack overflow.", err)
self.assertEqual(rc, 1)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
@staticmethod # pickled as target of child process in the following test
def _extract_logrecord_process_name(key, logMultiprocessing, conn=None):
prev_logMultiprocessing = logging.logMultiprocessing
logging.logMultiprocessing = logMultiprocessing
try:
import multiprocessing as mp
name = mp.current_process().name
r1 = logging.makeLogRecord({'msg': f'msg1_{key}'})
del sys.modules['multiprocessing']
r2 = logging.makeLogRecord({'msg': f'msg2_{key}'})
results = {'processName' : name,
'r1.processName': r1.processName,
'r2.processName': r2.processName,
}
finally:
logging.logMultiprocessing = prev_logMultiprocessing
if conn:
conn.send(results)
else:
return results
def test_multiprocessing(self):
multiprocessing_imported = 'multiprocessing' in sys.modules
try:
# logMultiprocessing is True by default
self.assertEqual(logging.logMultiprocessing, True)
LOG_MULTI_PROCESSING = True
# When logMultiprocessing == True:
# In the main process processName = 'MainProcess'
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
results = self._extract_logrecord_process_name(1, LOG_MULTI_PROCESSING)
self.assertEqual('MainProcess', results['processName'])
self.assertEqual('MainProcess', results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
# In other processes, processName is correct when multiprocessing in imported,
# but it is (incorrectly) defaulted to 'MainProcess' otherwise (bpo-38762).
import multiprocessing
parent_conn, child_conn = multiprocessing.Pipe()
p = multiprocessing.Process(
target=self._extract_logrecord_process_name,
args=(2, LOG_MULTI_PROCESSING, child_conn,)
)
p.start()
results = parent_conn.recv()
self.assertNotEqual('MainProcess', results['processName'])
self.assertEqual(results['processName'], results['r1.processName'])
self.assertEqual('MainProcess', results['r2.processName'])
p.join()
finally:
if multiprocessing_imported:
import multiprocessing
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.setLevel(self.original_logging_level)
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', encoding='utf-8')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a', encoding='utf-8')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='%(asctime)s - %(message)s')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, '%(asctime)s - %(message)s')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def test_encoding(self):
try:
encoding = 'utf-8'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='strict',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data,
'The Øresund Bridge joins Copenhagen to Malmö')
def test_encoding_errors(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors='ignore',
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, 'The resund Bridge joins Copenhagen to Malm')
def test_encoding_errors_default(self):
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertEqual(handler.errors, 'backslashreplace')
logging.debug('😂: ☃️: The Øresund Bridge joins Copenhagen to Malmö')
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
self.assertEqual(data, r'\U0001f602: \u2603\ufe0f: The \xd8resund '
r'Bridge joins Copenhagen to Malm\xf6')
def test_encoding_errors_none(self):
# Specifying None should behave as 'strict'
try:
encoding = 'ascii'
logging.basicConfig(filename='test.log', encoding=encoding,
errors=None,
format='%(message)s', level=logging.DEBUG)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
self.assertEqual(handler.encoding, encoding)
self.assertIsNone(handler.errors)
message = []
def dummy_handle_error(record):
_, v, _ = sys.exc_info()
message.append(str(v))
handler.handleError = dummy_handle_error
logging.debug('The Øresund Bridge joins Copenhagen to Malmö')
self.assertTrue(message)
self.assertIn("'ascii' codec can't encode "
"character '\\xd8' in position 4:", message[0])
finally:
handler.close()
with open('test.log', encoding='utf-8') as f:
data = f.read().strip()
os.remove('test.log')
# didn't write anything due to the encoding error
self.assertEqual(data, r'')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest, AssertErrorMessage):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: None',
self.logger.setLevel, None)
self.assert_error_message(
TypeError, 'Level not an integer or a valid string: (0, 0)',
self.logger.setLevel, (0, 0))
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('root'))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, encoding='utf-8', delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8", maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn, encoding="utf-8")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
def test_namer_rotator_inheritance(self):
class HandlerWithNamerAndRotator(logging.handlers.RotatingFileHandler):
def namer(self, name):
return name + ".test"
def rotator(self, source, dest):
if os.path.exists(source):
os.replace(source, dest + ".rotated")
rh = HandlerWithNamerAndRotator(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
self.assertEqual(rh.namer(self.fn), self.fn + ".test")
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(rh.namer(self.fn + ".1") + ".rotated")
self.assertFalse(os.path.exists(rh.namer(self.fn + ".1")))
rh.close()
@support.requires_zlib()
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, encoding="utf-8", backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(
self.fn, 'S', encoding="utf-8", backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', encoding="utf-8", delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', encoding="utf-8", delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='MIDNIGHT', interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when='W%d' % day, interval=1, backupCount=0,
utc=True, atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, encoding="utf-8", when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
not_exported = {
'logThreads', 'logMultiprocessing', 'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger', 'root',
'threading'}
support.check__all__(self, logging, not_exported=not_exported)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
command_line.py
|
import sklearn # to load libgomp early to solve problems with static TLS on some systems like bioconda mulled tests
import matplotlib.pyplot as plt # also to solve import ordering problems in bioconda mulled tests
from deepaclive.receiver import Receiver
from deepaclive.sender import Sender
from deepaclive.refilter import Refilterer
from deepaclive.tests import run_tests
import argparse
from deepaclive import __version__
from multiprocessing import Process
from deepac.command_line import add_global_parser, global_setup
from deepac.utils import config_cpus, config_gpus
import numpy as np
import random as rn
def main():
seed = 0
np.random.seed(seed)
rn.seed(seed)
parse()
def run_tester(args):
tpu_resolver = global_setup(args)
if args.tpu is None:
n_cpus = config_cpus(args.n_cpus_rec)
config_gpus(args.gpus)
else:
n_cpus = args.n_cpus_rec
if args.custom:
args.command = None
run_tests(args.command, args.model, n_cpus, args.keep, args.scale, tpu_resolver)
def run_sender(args):
sender = Sender(read_length=args.read_length, input_dir=args.in_dir, output_dir=args.send_out_dir,
user_hostname=args.remote, key=args.key, port=args.port,
n_cpus=args.n_cpus_send, do_all=args.all, do_mapped=args.mapped)
barcodes = args.barcodes.split(',')
cycles = [int(c) for c in args.cycle_list.split(',')]
sender.run(cycles=cycles, barcodes=barcodes, mode=args.format)
def run_receiver(args):
tpu_resolver = global_setup(args)
if args.tpu is None:
n_cpus = config_cpus(args.n_cpus_rec)
config_gpus(args.gpus)
else:
n_cpus = args.n_cpus_rec
if args.custom:
args.command = None
receiver = Receiver(args.command, model=args.model, read_length=args.read_length, input_dir=args.rec_in_dir,
output_dir=args.rec_out_dir, n_cpus=n_cpus, threshold=args.threshold,
tpu_resolver=tpu_resolver)
cycles = [int(c) for c in args.cycle_list.split(',')]
barcodes = args.barcodes.split(',')
receiver.run(cycles=cycles, barcodes=barcodes, mode=args.format, discard_neg=args.discard_neg)
def run_refilter(args):
preds_input_dirs = args.preds_in_dir.split(',')
refilterer = Refilterer(read_length=args.read_length, input_fasta_dir=args.fasta_in_dir,
input_npy_dirs=preds_input_dirs, output_dir=args.ref_out_dir,
threshold=args.threshold)
cycles = [int(c) for c in args.cycle_list.split(',')]
barcodes = args.barcodes.split(',')
refilterer.run(cycles=cycles, barcodes=barcodes, discard_neg=args.discard_neg)
def run_local(args):
pr = Process(target=run_receiver, args=(args,))
pr.start()
ps = Process(target=run_sender, args=(args,))
ps.start()
pr.join()
ps.join()
def add_base_parser(bparser):
bparser.add_argument('-l', '--read-length', dest='read_length', type=int, required=True,
help='Expected read length')
bparser.add_argument('-s', '--seq-cycles', dest='cycle_list', required=True,
help='Comma-separated list of sequencing cycles to analyze.')
bparser.add_argument('-f', '--format', default="bam",
help='Format of temp files. bam or fasta.')
bparser.add_argument('-B', '--barcodes', default="undetermined",
help='Comma-separated list of barcodes of samples to analyze. Default: "undetermined"')
return bparser
def add_receiver_parser(rparser):
tparser = add_tester_parser(rparser)
tparser.add_argument('-t', '--threshold', dest='threshold', type=float, default=0.5,
help='Classification threshold.')
tparser.add_argument('-I', '--receiver-input', dest='rec_in_dir', required=True, help="Receiver input directory.")
tparser.add_argument('-O', '--receiver-output', dest='rec_out_dir', required=True,
help="Receiver output directory.")
tparser.add_argument('-d', '--discard-neg', dest='discard_neg', action='store_true',
help="Don't save predictions for nonpathogenic reads.")
return tparser
def add_tester_parser(tparser):
command_group = tparser.add_mutually_exclusive_group()
command_group.add_argument('-c', '--command', default='deepac', help='DeePaC command to use '
'(switches builtin models).')
command_group.add_argument('-C', '--custom', action='store_true', help='Use a custom model.')
tparser.add_argument('-m', '--model', default='rapid', help='Model to use. "rapid", "sensitive" '
'or custom .h5 file.')
tparser.add_argument('-N', '--n-cpus-rec', dest='n_cpus_rec', type=int,
help='Number of cores used by the receiver. Default: all')
tparser.add_argument('-g', '--gpus', dest="gpus", nargs='+', type=int,
help="GPU devices to use (comma-separated). Default: all")
return tparser
def add_sender_parser(sparser):
sparser.add_argument('-i', '--sender-input', dest='in_dir', required=True, help='Sender input directory.')
sparser.add_argument('-o', '--sender-output', dest='send_out_dir', required=True, help='Sender output directory.')
sparser.add_argument('-n', '--n-cpus-send', dest='n_cpus_send', type=int,
help='Number of cores used by the sender. Default: all.')
mapped_group = sparser.add_mutually_exclusive_group()
mapped_group.add_argument('-A', '--all', action='store_true', help="Analyze all reads (default: unmapped only).")
mapped_group.add_argument('-M', '--mapped', action='store_true', help="Analyze only MAPPED reads "
"(default: unmapped only).")
sparser.add_argument('-r', '--remote', help='Remote host and path (with username).')
sparser.add_argument('-k', '--key', help='SSH key.')
sparser.add_argument('-p', '--port', default=22, help='Port for SFTP connection.')
return sparser
def add_refilter_parser(rparser):
rparser.add_argument('-t', '--threshold', dest='threshold', type=float, default=0.5,
help='Classification threshold.')
rparser.add_argument('-i', '--fasta-input', dest='fasta_in_dir', required=True, help="Receiver input directory.")
rparser.add_argument('-I', '--preds-input', dest='preds_in_dir', required=True,
help="Comma-separated list of receiver output directories.")
rparser.add_argument('-O', '--refilter-output', dest='ref_out_dir', required=True,
help="Refilter output directory.")
rparser.add_argument('-d', '--discard-neg', dest='discard_neg', action='store_true',
help="Don't save predictions for nonpathogenic reads.")
return rparser
def parse():
"""Parse DeePaC-live CLI arguments."""
parser = argparse.ArgumentParser(prog='deepac-live', description="Running DeePaC in real time.")
parser = add_global_parser(parser)
subparsers = parser.add_subparsers(help='DeePaC-live subcommands. See command --help for details.',
dest='subparser')
parser_sender = subparsers.add_parser('sender', help='Prepare and send data.')
parser_sender = add_base_parser(parser_sender)
parser_sender = add_sender_parser(parser_sender)
parser_sender.set_defaults(func=run_sender)
parser_receiver = subparsers.add_parser('receiver', help='Receive and analyze data.')
parser_receiver = add_base_parser(parser_receiver)
parser_receiver = add_receiver_parser(parser_receiver)
parser_receiver.set_defaults(func=run_receiver)
parser_refilter = subparsers.add_parser('refilter', help='Refilter data with ensembles or alternative thresholds.')
parser_refilter = add_base_parser(parser_refilter)
parser_refilter = add_refilter_parser(parser_refilter)
parser_refilter.set_defaults(func=run_refilter)
parser_local = subparsers.add_parser('local', help='Process data locally.')
parser_local = add_base_parser(parser_local)
parser_local = add_receiver_parser(parser_local)
parser_local = add_sender_parser(parser_local)
parser_local.set_defaults(func=run_local)
parser_test = subparsers.add_parser('test', help='Test locally.')
parser_test = add_tester_parser(parser_test)
parser_test.add_argument('-k', '--keep', help="Don't delete previous test output.",
default=False, action="store_true")
parser_test.add_argument('-s', '--scale', help="Generate s*1024 reads for testing (Default: s=1).",
default=1, type=int)
parser_test.set_defaults(func=run_tester)
args = parser.parse_args()
if args.version:
print(__version__)
elif hasattr(args, 'func'):
args.func(args)
else:
print(__version__)
parser.print_help()
if __name__ == "__main__":
main()
|
controller.py
|
import argparse
import os
import time
import re
from multiprocessing import cpu_count, Pool
from multiprocessing.pool import ThreadPool
from threading import Thread, Lock, Event
import socket
from io import BytesIO
import math
import ast
import traceback
import chainer
import chainer.functions as F
try:
import cupy as cp
from chainer.backends import cuda
except Exception as e:
None
import numpy as np
import gym
from scipy.misc import imresize
import imageio
from lib.utils import log, mkdir, pre_process_image_tensor, post_process_image_tensor
try:
from lib.env_wrappers import ViZDoomWrapper
except Exception as e:
None
from lib.constants import DOOM_GAMES
from model import MDN_RNN
from vision import CVAE
from lib.data import ModelDataset
ID = "controller"
CLUSTER_WORKERS = ['machine01','machine02','machine03','machine04','machine05','machine06',
'machine07','machine08','machine09','machine10','machine11','machine12']
CLUSTER_DISPATCHER = 'machine01'
CLUSTER_DISPATCHER_PORT = 9955
CLUSTER_WORKER_PORT = 9956
cluster_cumulative_rewards = {}
lock = Lock()
initial_z_t = None
def action(args, W_c, b_c, z_t, h_t, c_t, gpu):
if args.weights_type == 1:
input = F.concat((z_t, h_t), axis=0).data
action = F.tanh(W_c.dot(input) + b_c).data
elif args.weights_type == 2:
input = F.concat((z_t, h_t, c_t), axis=0).data
dot = W_c.dot(input)
if gpu is not None:
dot = cp.asarray(dot)
else:
dot = np.asarray(dot)
output = F.tanh(dot).data
if output == 1.:
output = 0.999
action_dim = args.action_dim + 1
action_range = 2 / action_dim
action = [0. for i in range(action_dim)]
start = -1.
for i in range(action_dim):
if start <= output and output <= (start + action_range):
action[i] = 1.
break
start += action_range
mid = action_dim // 2 # reserve action[mid] for no action
action = action[0:mid] + action[mid + 1:action_dim]
if gpu is not None:
action = cp.asarray(action).astype(cp.float32)
else:
action = np.asarray(action).astype(np.float32)
return action
def transform_to_weights(args, parameters):
if args.weights_type == 1:
W_c = parameters[0:args.action_dim * (args.z_dim + args.hidden_dim)].reshape(args.action_dim,
args.z_dim + args.hidden_dim)
b_c = parameters[args.action_dim * (args.z_dim + args.hidden_dim):]
elif args.weights_type == 2:
W_c = parameters
b_c = None
return W_c, b_c
def rollout(rollout_arg_tuple):
try:
global initial_z_t
generation, mutation_idx, trial, args, vision, model, gpu, W_c, b_c, max_timesteps, with_frames = rollout_arg_tuple
# The same starting seed gets passed in multiprocessing, need to reset it for each process:
np.random.seed()
if not with_frames:
log(ID, ">>> Starting generation #" + str(generation) + ", mutation #" + str(
mutation_idx + 1) + ", trial #" + str(trial + 1))
else:
frames_array = []
start_time = time.time()
model.reset_state()
if args.in_dream:
z_t, _, _, _ = initial_z_t[np.random.randint(len(initial_z_t))]
z_t = z_t[0]
if gpu is not None:
z_t = cuda.to_gpu(z_t)
if with_frames:
observation = vision.decode(z_t).data
if gpu is not None:
observation = cp.asnumpy(observation)
observation = post_process_image_tensor(observation)[0]
else:
# free up precious GPU memory:
if gpu is not None:
vision.to_cpu()
vision = None
if args.initial_z_noise > 0.:
if gpu is not None:
z_t += cp.random.normal(0., args.initial_z_noise, z_t.shape).astype(cp.float32)
else:
z_t += np.random.normal(0., args.initial_z_noise, z_t.shape).astype(np.float32)
else:
if args.game in DOOM_GAMES:
env = ViZDoomWrapper(args.game)
else:
env = gym.make(args.game)
observation = env.reset()
if with_frames:
frames_array.append(observation)
if gpu is not None:
h_t = cp.zeros(args.hidden_dim).astype(cp.float32)
c_t = cp.zeros(args.hidden_dim).astype(cp.float32)
else:
h_t = np.zeros(args.hidden_dim).astype(np.float32)
c_t = np.zeros(args.hidden_dim).astype(np.float32)
done = False
cumulative_reward = 0
t = 0
while not done:
if not args.in_dream:
observation = imresize(observation, (args.frame_resize, args.frame_resize))
observation = pre_process_image_tensor(np.expand_dims(observation, 0))
if gpu is not None:
observation = cuda.to_gpu(observation)
z_t = vision.encode(observation, return_z=True).data[0]
a_t = action(args, W_c, b_c, z_t, h_t, c_t, gpu)
if args.in_dream:
z_t, done = model(z_t, a_t, temperature=args.temperature)
done = done.data[0]
if with_frames:
observation = post_process_image_tensor(vision.decode(z_t).data)[0]
reward = 1
if done >= args.done_threshold:
done = True
else:
done = False
else:
observation, reward, done, _ = env.step(a_t if gpu is None else cp.asnumpy(a_t))
model(z_t, a_t, temperature=args.temperature)
if with_frames:
frames_array.append(observation)
cumulative_reward += reward
h_t = model.get_h().data[0]
c_t = model.get_c().data[0]
t += 1
if max_timesteps is not None and t == max_timesteps:
break
elif args.in_dream and t == args.dream_max_len:
log(ID,
">>> generation #{}, mutation #{}, trial #{}: maximum length of {} timesteps reached in dream!"
.format(generation, str(mutation_idx + 1), str(trial + 1), t))
break
if not args.in_dream:
env.close()
if not with_frames:
log(ID,
">>> Finished generation #{}, mutation #{}, trial #{} in {} timesteps in {:.2f}s with cumulative reward {:.2f}"
.format(generation, str(mutation_idx + 1), str(trial + 1), t, (time.time() - start_time),
cumulative_reward))
return cumulative_reward
else:
frames_array = np.asarray(frames_array)
if args.game in DOOM_GAMES and not args.in_dream:
frames_array = post_process_image_tensor(frames_array)
return cumulative_reward, np.asarray(frames_array)
except Exception:
print(traceback.format_exc())
return 0.
def rollout_worker(worker_arg_tuple):
generation, mutation_idx, args, vision, model, mutation, max_timesteps, in_parallel = worker_arg_tuple
W_c, b_c = transform_to_weights(args, mutation)
log(ID, ">> Starting generation #" + str(generation) + ", mutation #" + str(mutation_idx + 1))
start_time = time.time()
rollout_arg_tuples = []
cumulative_rewards = []
for trial in range(args.trials):
this_vision = vision.copy()
this_model = model.copy()
gpu = None
if isinstance(args.gpus, (list,)):
gpu = args.gpus[mutation_idx % len(args.gpus)]
elif args.gpu >= 0:
gpu = args.gpu
if gpu is not None:
# log(ID,"Assigning GPU "+str(gpu))
cp.cuda.Device(gpu).use()
this_vision.to_gpu()
this_model.to_gpu()
W_c = cuda.to_gpu(W_c)
if b_c is not None:
b_c = cuda.to_gpu(b_c)
if in_parallel:
rollout_arg_tuples.append(
(generation, mutation_idx, trial, args, this_vision, this_model, gpu, W_c, b_c, max_timesteps, False))
else:
cumulative_reward = rollout(
(generation, mutation_idx, trial, args, this_vision, this_model, gpu, W_c, b_c, max_timesteps, False))
cumulative_rewards.append(cumulative_reward)
if in_parallel:
pool = Pool(args.trials)
cumulative_rewards = pool.map(rollout, rollout_arg_tuples)
pool.close()
pool.join()
avg_cumulative_reward = np.mean(cumulative_rewards)
log(ID, ">> Finished generation #{}, mutation #{}, in {:.2f}s with averge cumulative reward {:.2f} over {} trials"
.format(generation, (mutation_idx + 1), (time.time() - start_time), avg_cumulative_reward, args.trials))
return avg_cumulative_reward
class WorkerServer(object):
def __init__(self, port, args, vision, model):
self.args = args
self.vision = vision
self.model = model
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('', port))
self.listen()
def listen(self):
self.sock.listen(10)
while True:
client, address = self.sock.accept()
client.settimeout(10)
Thread(target=self.listenToClient, args=(client, address)).start()
def listenToClient(self, client, address):
data = b''
while True:
input = client.recv(1024)
data += input
if input.endswith(b"\r\n"):
data = data.strip()
break
if not input: break
npz = np.load(BytesIO(data))
chunked_mutations = npz['chunked_mutations']
indices = npz['indices']
generation = npz['generation']
max_timesteps = npz['max_timesteps']
npz.close()
client.send(b"OK")
client.close()
log(ID, "> Received " + str(len(chunked_mutations)) + " mutations from dispatcher")
length = len(chunked_mutations)
cores = cpu_count()
if cores < self.args.trials:
splits = length
else:
splits = math.ceil((length * self.args.trials) / cores)
chunked_mutations = np.array_split(chunked_mutations, splits)
indices = np.array_split(indices, splits)
cumulative_rewards = {}
for i, this_chunked_mutations in enumerate(chunked_mutations):
this_indices = indices[i]
worker_arg_tuples = []
for i, mutation in enumerate(this_chunked_mutations):
worker_arg_tuples.append(
(generation, this_indices[i], self.args, self.vision, self.model, mutation, max_timesteps, True))
pool = ThreadPool(len(this_chunked_mutations))
this_cumulative_rewards = pool.map(rollout_worker, worker_arg_tuples)
for i, index in enumerate(this_indices):
cumulative_rewards[index] = this_cumulative_rewards[i]
log(ID, "> Sending results back to dispatcher: " + str(cumulative_rewards))
succeeded = False
for retries in range(3):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect((CLUSTER_DISPATCHER, CLUSTER_DISPATCHER_PORT))
sock.sendall(str(cumulative_rewards).encode())
sock.sendall(b"\r\n")
data = sock.recv(1024).decode("utf-8")
sock.close()
if data == "OK":
succeeded = True
break
except Exception as e:
log(ID, e)
log(ID, "Unable to send results back to dispatcher. Retrying after sleeping for 30s")
time.sleep(30)
if not succeeded:
log(ID, "Unable to send results back to dispatcher!")
class DispatcherServer(object):
def __init__(self, port, args, cluster_event):
self.args = args
self.cluster_event = cluster_event
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('', port))
self.listen()
def listen(self):
try:
count = 10 * len(CLUSTER_WORKERS)
self.sock.listen(count)
while True:
client, address = self.sock.accept()
client.settimeout(10)
Thread(target=self.listenToClient, args=(client, address)).start()
except Exception as e:
print(e)
def listenToClient(self, client, address):
global cluster_cumulative_rewards
data = b''
while True:
input = client.recv(1024)
data += input
if input.endswith(b"\r\n"):
data = data.strip()
break
if not input: break
cumulative_rewards = ast.literal_eval(data.decode("utf-8"))
client.send(b"OK")
client.close()
log(ID, "> DispatcherServer received results: " + str(cumulative_rewards))
with lock:
for index in cumulative_rewards:
cluster_cumulative_rewards[index] = cumulative_rewards[index]
if len(cluster_cumulative_rewards) == self.args.lambda_:
log(ID, "> All results received. Waking up CMA-ES loop")
self.cluster_event.set()
def main():
parser = argparse.ArgumentParser(description='World Models ' + ID)
parser.add_argument('--data_dir', '-d', default="/data/wm", help='The base data/output directory')
parser.add_argument('--game', default='CarRacing-v0',
help='Game to use') # https://gym.openai.com/envs/CarRacing-v0/
parser.add_argument('--experiment_name', default='experiment_1', help='To isolate its files from others')
parser.add_argument('--model', '-m', default='', help='Initialize the model from given file')
parser.add_argument('--no_resume', action='store_true', help='Don''t auto resume from the latest snapshot')
parser.add_argument('--resume_from', '-r', default='', help='Resume the optimization from a specific snapshot')
parser.add_argument('--hidden_dim', default=256, type=int, help='LSTM hidden units')
parser.add_argument('--z_dim', '-z', default=32, type=int, help='dimension of encoded vector')
parser.add_argument('--mixtures', default=5, type=int, help='number of gaussian mixtures for MDN')
parser.add_argument('--lambda_', "-l", default=7, type=int, help='Population size for CMA-ES')
parser.add_argument('--mu', default=0.5, type=float, help='Keep this percent of fittest mutations for CMA-ES')
parser.add_argument('--trials', default=3, type=int,
help='The number of trials per mutation for CMA-ES, to average fitness score over')
parser.add_argument('--target_cumulative_reward', default=900, type=int, help='Target cumulative reward')
parser.add_argument('--frame_resize', default=64, type=int, help='h x w resize of each observation frame')
parser.add_argument('--temperature', '-t', default=1.0, type=float, help='Temperature (tau) for MDN-RNN (model)')
parser.add_argument('--snapshot_interval', '-s', default=5, type=int,
help='snapshot every x generations of evolution')
parser.add_argument('--cluster_mode', action='store_true',
help='If in a distributed cpu cluster. Set CLUSTER_ variables accordingly.')
parser.add_argument('--test', action='store_true',
help='Generate a rollout gif only (must have access to saved snapshot or model)')
parser.add_argument('--gpu', '-g', default=-1, type=int, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--gpus', default="", help='A list of gpus to use, i.e. "0,1,2,3"')
parser.add_argument('--curriculum', default="", help='initial,step e.g. 50,5 starts at 50 steps and adds 5 steps')
parser.add_argument('--predict_done', action='store_true', help='Whether MDN-RNN should also predict done state')
parser.add_argument('--done_threshold', default=0.5, type=float, help='What done probability really means done')
parser.add_argument('--weights_type', default=1, type=int,
help="1=action_dim*(z_dim+hidden_dim), 2=z_dim+2*hidden_dim")
parser.add_argument('--in_dream', action='store_true', help='Whether to train in dream, or real environment')
parser.add_argument('--dream_max_len', default=2100, type=int, help="Maximum timesteps for dream to avoid runaway")
parser.add_argument('--cores', default=0, type=int,
help='# CPU cores for main CMA-ES loop in non-cluster_mode. 0=all cores')
parser.add_argument('--initial_z_size', default=10000, type=int,
help="How many real initial frames to load for dream training")
parser.add_argument('--initial_z_noise', default=0., type=float,
help="Gaussian noise std for initial z for dream training")
parser.add_argument('--cluster_max_wait', default=5400, type=int,
help="Move on after this many seconds of no response from worker(s)")
args = parser.parse_args()
log(ID, "args =\n " + str(vars(args)).replace(",", ",\n "))
hostname = socket.gethostname().split(".")[0]
if args.gpus:
args.gpus = [int(item) for item in args.gpus.split(',')]
if args.curriculum:
curriculum_start = int(args.curriculum.split(',')[0])
curriculum_step = int(args.curriculum.split(',')[1])
output_dir = os.path.join(args.data_dir, args.game, args.experiment_name, ID)
mkdir(output_dir)
model_dir = os.path.join(args.data_dir, args.game, args.experiment_name, 'model')
vision_dir = os.path.join(args.data_dir, args.game, args.experiment_name, 'vision')
random_rollouts_dir = os.path.join(args.data_dir, args.game, args.experiment_name, 'random_rollouts')
model = MDN_RNN(args.hidden_dim, args.z_dim, args.mixtures, args.predict_done)
chainer.serializers.load_npz(os.path.join(model_dir, "model.model"), model)
vision = CVAE(args.z_dim)
chainer.serializers.load_npz(os.path.join(vision_dir, "vision.model"), vision)
global initial_z_t
if args.in_dream:
log(ID,"Loading random rollouts for initial frames for dream training")
initial_z_t = ModelDataset(dir=random_rollouts_dir,
load_batch_size=args.initial_z_size,
verbose=False)
if args.game in DOOM_GAMES:
env = ViZDoomWrapper(args.game)
else:
env = gym.make(args.game)
action_dim = len(env.action_space.low)
args.action_dim = action_dim
env = None
auto_resume_file = None
if not args.cluster_mode or (args.cluster_mode and hostname == CLUSTER_DISPATCHER):
max_iter = 0
files = os.listdir(output_dir)
for file in files:
if re.match(r'^snapshot_iter_', file):
iter = int(re.search(r'\d+', file).group())
if (iter > max_iter):
max_iter = iter
if max_iter > 0:
auto_resume_file = os.path.join(output_dir, "snapshot_iter_{}.npz".format(max_iter))
resume = None
if args.model:
if args.model == 'default':
args.model = os.path.join(output_dir, ID + ".model")
log(ID, "Loading saved model from: " + args.model)
resume = args.model
elif args.resume_from:
log(ID, "Resuming manually from snapshot: " + args.resume_from)
resume = args.resume_from
elif not args.no_resume and auto_resume_file is not None:
log(ID, "Auto resuming from last snapshot: " + auto_resume_file)
resume = auto_resume_file
if resume is not None:
npz = np.load(resume)
pc = npz['pc']
ps = npz['ps']
B = npz['B']
D = npz['D']
C = npz['C']
invsqrtC = npz['invsqrtC']
eigeneval = npz['eigeneval']
xmean = npz['xmean']
sigma = npz['sigma']
counteval = npz['counteval']
generation = npz['generation'] + 1
cumulative_rewards_over_generations = npz['cumulative_rewards_over_generations']
if args.curriculum:
if 'max_timesteps' in npz and npz['max_timesteps'] is not None:
max_timesteps = npz['max_timesteps']
else:
max_timesteps = curriculum_start
last_highest_avg_cumulative_reward = max(cumulative_rewards_over_generations.mean(axis=1))
else:
max_timesteps = None
npz.close()
log(ID, "Starting")
if args.cluster_mode and hostname != CLUSTER_DISPATCHER and not args.test:
log(ID, "Starting cluster worker")
WorkerServer(CLUSTER_WORKER_PORT, args, vision, model)
elif not args.test:
if args.cluster_mode:
global cluster_cumulative_rewards
cluster_event = Event()
log(ID, "Starting cluster dispatcher")
dispatcher_thread = Thread(target=DispatcherServer, args=(CLUSTER_DISPATCHER_PORT, args, cluster_event))
dispatcher_thread.start()
# Make the dispatcher a worker too
log(ID, "Starting cluster worker")
worker_thread = Thread(target=WorkerServer, args=(CLUSTER_WORKER_PORT, args, vision, model))
worker_thread.start()
if args.weights_type == 1:
N = action_dim * (args.z_dim + args.hidden_dim) + action_dim
elif args.weights_type == 2:
N = args.z_dim + 2 * args.hidden_dim
stopeval = 1e3 * N ** 2
stopfitness = args.target_cumulative_reward
lambda_ = args.lambda_ # 4+int(3*np.log(N))
mu = int(lambda_ * args.mu) # //2
weights = np.log(mu + 1 / 2) - np.log(np.asarray(range(1, mu + 1))).astype(np.float32)
weights = weights / np.sum(weights)
mueff = (np.sum(weights) ** 2) / np.sum(weights ** 2)
cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)
cs = (mueff + 2) / (N + mueff + 5)
c1 = 2 / ((N + 1.3) ** 2 + mueff)
cmu = min(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((N + 2) ** 2 + mueff))
damps = 1 + 2 * max(0, ((mueff - 1) / (N + 1)) ** 0.5 - 1) + cs
chiN = N ** 0.5 * (1 - 1 / (4 * N) + 1 / (21 * N ** 2))
if resume is None:
pc = np.zeros(N).astype(np.float32)
ps = np.zeros(N).astype(np.float32)
B = np.eye(N, N).astype(np.float32)
D = np.ones(N).astype(np.float32)
C = B * np.diag(D ** 2) * B.T
invsqrtC = B * np.diag(D ** -1) * B.T
eigeneval = 0
xmean = np.random.randn(N).astype(np.float32)
sigma = 0.3
counteval = 0
generation = 1
cumulative_rewards_over_generations = None
if args.curriculum:
max_timesteps = curriculum_start
last_highest_avg_cumulative_reward = None
else:
max_timesteps = None
solution_found = False
while counteval < stopeval:
log(ID, "> Starting evolution generation #" + str(generation))
arfitness = np.zeros(lambda_).astype(np.float32)
arx = np.zeros((lambda_, N)).astype(np.float32)
for k in range(lambda_):
arx[k] = xmean + sigma * B.dot(D * np.random.randn(N).astype(np.float32))
counteval += 1
if not args.cluster_mode:
if args.cores == 0:
cores = cpu_count()
else:
cores = args.cores
pool = Pool(cores)
worker_arg_tuples = []
for k in range(lambda_):
worker_arg_tuples.append((generation, k, args, vision, model, arx[k], max_timesteps, False))
cumulative_rewards = pool.map(rollout_worker, worker_arg_tuples)
pool.close()
pool.join()
for k, cumulative_reward in enumerate(cumulative_rewards):
arfitness[k] = cumulative_reward
else:
arx_splits = np.array_split(arx, len(CLUSTER_WORKERS))
indices = np.array_split(np.arange(lambda_), len(CLUSTER_WORKERS))
cluster_cumulative_rewards = {}
for i, chunked_mutations in enumerate(arx_splits):
log(ID, "> Dispatching " + str(len(chunked_mutations)) + " mutations to " + CLUSTER_WORKERS[i])
compressed_array = BytesIO()
np.savez_compressed(compressed_array,
chunked_mutations=chunked_mutations,
indices=indices[i],
generation=generation,
max_timesteps=max_timesteps)
compressed_array.seek(0)
out = compressed_array.read()
succeeded = False
for retries in range(3):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect((CLUSTER_WORKERS[i], CLUSTER_WORKER_PORT))
sock.sendall(out)
sock.sendall(b"\r\n")
data = sock.recv(1024).decode("utf-8")
sock.close()
if data == "OK":
succeeded = True
break
except Exception as e:
log(ID, e)
log(ID, "Unable to dispatch mutations to " + CLUSTER_WORKERS[i] + ". Retrying after sleeping for 30s")
time.sleep(30)
if not succeeded:
log(ID, "Unable to dispatch mutations to " + CLUSTER_WORKERS[i] + "!")
log(ID, "> Dispatched all mutations to cluster. Waiting for results.")
cluster_event.clear()
cluster_event.wait(args.cluster_max_wait) # Cut our losses if some results never get returned
for k in range(lambda_):
if k in cluster_cumulative_rewards:
arfitness[k] = cluster_cumulative_rewards[k]
else:
arfitness[k] = 0.
if cumulative_rewards_over_generations is None:
cumulative_rewards_over_generations = np.expand_dims(arfitness, 0)
else:
cumulative_rewards_over_generations = np.concatenate(
(cumulative_rewards_over_generations, np.expand_dims(arfitness, 0)),
axis=0)
arindex = np.argsort(-arfitness)
# arfitness = arfitness[arindex]
xold = xmean
xmean = weights.dot(arx[arindex[0:mu]])
avg_cumulative_reward = np.mean(arfitness)
log(ID, "> Finished evolution generation #{}, average cumulative reward = {:.2f}"
.format(generation, avg_cumulative_reward))
if generation > 1 and args.curriculum:
if last_highest_avg_cumulative_reward is None:
last_highest_avg_cumulative_reward = np.mean(cumulative_rewards_over_generations[-2])
log(ID, "> Highest average cumulative reward from previous generations = {:.2f}".format(
last_highest_avg_cumulative_reward))
if avg_cumulative_reward > (last_highest_avg_cumulative_reward*0.99): #Let is pass if within 1% of the old average
max_timesteps += curriculum_step
log(ID, "> Average cumulative reward increased. Increasing max timesteps to " + str(max_timesteps))
last_highest_avg_cumulative_reward = None
else:
log(ID,
"> Average cumulative reward did not increase. Keeping max timesteps at " + str(max_timesteps))
# Average over the whole population, but breaking here means we use only the
# top x% of the mutations as the calculation for the final mean
if avg_cumulative_reward >= stopfitness:
solution_found = True
break
ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * invsqrtC.dot((xmean - xold) / sigma)
hsig = np.linalg.norm(ps) / np.sqrt(1 - (1 - cs) ** (2 * counteval / lambda_)) / chiN < 1.4 + 2 / (N + 1)
pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * ((xmean - xold) / sigma)
artmp = (1 / sigma) * (arx[arindex[0:mu]] - xold)
C = (1 - c1 - cmu) * C + c1 * (pc.dot(pc.T) + (1 - hsig) * cc * (2 - cc) * C) + cmu * artmp.T.dot(
np.diag(weights)).dot(artmp)
sigma = sigma * np.exp((cs / damps) * (np.linalg.norm(ps) / chiN - 1))
if counteval - eigeneval > lambda_ / (c1 + cmu) / N / 10:
eigeneval = counteval
C = np.triu(C) + np.triu(C, 1).T
D, B = np.linalg.eig(C)
D = np.sqrt(D)
invsqrtC = B.dot(np.diag(D ** -1).dot(B.T))
if generation % args.snapshot_interval == 0:
snapshot_file = os.path.join(output_dir, "snapshot_iter_" + str(generation) + ".npz")
log(ID, "> Saving snapshot to " + str(snapshot_file))
np.savez_compressed(snapshot_file,
pc=pc,
ps=ps,
B=B,
D=D,
C=C,
invsqrtC=invsqrtC,
eigeneval=eigeneval,
xmean=xmean,
sigma=sigma,
counteval=counteval,
generation=generation,
cumulative_rewards_over_generations=cumulative_rewards_over_generations,
max_timesteps=max_timesteps)
generation += 1
if solution_found:
log(ID, "Evolution Complete!")
log(ID, "Solution found at generation #" + str(generation) + ", with average cumulative reward = " +
str(avg_cumulative_reward) + " over " + str(args.lambda_ * args.trials) + " rollouts")
else:
log(ID, "Solution not found")
controller_model_file = os.path.join(output_dir, ID + ".model")
if os.path.exists(controller_model_file):
os.remove(controller_model_file)
log(ID, "Saving model to: " + controller_model_file)
np.savez_compressed(controller_model_file,
pc=pc,
ps=ps,
B=B,
D=D,
C=C,
invsqrtC=invsqrtC,
eigeneval=eigeneval,
xmean=xmean,
sigma=sigma,
counteval=counteval,
generation=generation,
cumulative_rewards_over_generations=cumulative_rewards_over_generations,
max_timesteps=max_timesteps)
os.rename(os.path.join(output_dir, ID + ".model.npz"), controller_model_file)
# xmean = np.random.randn(action_dim * (args.z_dim + args.hidden_dim) + action_dim).astype(np.float32)
# xmean = np.random.randn(args.z_dim + 2 * args.hidden_dim).astype(np.float32)
parameters = xmean
if args.in_dream:
log(ID, "Generating a rollout gif with the controller model in a dream")
W_c, b_c = transform_to_weights(args, parameters)
cumulative_reward, frames = rollout(
(0, 0, 0, args, vision.to_cpu(), model.to_cpu(), None, W_c, b_c, None, True))
imageio.mimsave(os.path.join(output_dir, 'dream_rollout.gif'), frames, fps=20)
log(ID, "Final cumulative reward in dream: " + str(cumulative_reward))
args.in_dream = False
log(ID, "Generating a rollout gif with the controller model in the environment")
W_c, b_c = transform_to_weights(args, parameters)
cumulative_reward, frames = rollout((0, 0, 0, args, vision.to_cpu(), model.to_cpu(), None, W_c, b_c, None, True))
imageio.mimsave(os.path.join(output_dir, 'env_rollout.gif'), frames, fps=20)
log(ID, "Final cumulative reward in environment: " + str(cumulative_reward))
log(ID, "Done")
if __name__ == '__main__':
main()
|
main.py
|
from threading import Thread
from dl import Downloader
from utils.query import download_state, print_download_state
if __name__ == '__main__':
while True:
pass
# download = Downloader(url, args.output if args.output else url.split('/')[-1])
# Thread(target=download.download).start()
# thread = Thread(target=print_download_state, args=(download,))
# thread.start()
# thread.join()
|
Server.py
|
from random import randint
import sys, traceback, threading, socket
from RtpPacket import RtpPacket
# server state
STATE = {
'INIT': 0,
'OK': 1,
'PLAYING': 2,
}
# process vided stream
class Streaming:
count = 1
def __init__(self, path):
self.path = path
try:
self.file = open(path, 'rb')
except:
raise IOError
self.currentFrame = 0
def getCurrentFrame(self):
return self.currentFrame
def getPath(self):
return self.path
def getFiel(self):
return self.file
def getNextFrame(self):
self.count += 1
path = 'img/' + str(self.count) + '.png'
print(path)
try:
self.file = open(path, 'rb')
except:
raise IOError
data = self.file.read()
self.currentFrame += 1
return data
# process server
class Server:
clientInfo = {}
state = STATE['INIT']
def __init__(self, clientInfo):
self.clientInfo = clientInfo
# 多client连接
def processThreads(self):
threading.Thread(target=self.getRequest).start()
def getRequest(self):
sk = self.clientInfo['rtspSocket'][0]
while 1:
data = sk.recv(1024).decode()
if data:
print('Recv:', data)
self.processRequest(data)
def processRequest(self, data):
dataList = data.split('\n')
param = dataList[0]
seq = dataList[1]
addr = dataList[2]
paramList = param.split(' ')
seqList = seq.split(' ')
addrList = addr.split(' ')
kind = paramList[0]
path = paramList[1]
# state setup
if kind == 'SETUP':
# msg
print ('SETUP...')
# get videostream
try:
self.clientInfo['videoStream'] = Streaming(path)
# set state
self.state = STATE['OK']
except IOError:
print ("Error: 404 not found.")
# set session
self.clientInfo['session'] = randint(100000, 999999)
# response msg
response = 'RTSP/1.0 200 OK\nCSeq: ' + \
seqList[1] + \
'\nSession: ' + \
str(self.clientInfo['session'])
sk = self.clientInfo['rtspSocket'][0]
sk.send(response.encode())
self.clientInfo['rtpPort'] = addrList[3]
# state play
elif kind == 'PLAY':
if self.state == STATE['OK']:
# msg
print ('PLAY...')
# set state
self.state = STATE['PLAYING']
# create sk
self.clientInfo['rtpSocket'] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# response msg
response = 'RTSP/1.0 200 OK\nCSeq: ' + \
seqList[1] + \
'\nSession: ' + \
str(self.clientInfo['session'])
sk = self.clientInfo['rtspSocket'][0]
sk.send(response.encode())
# process thread
self.clientInfo['event'] = threading.Event()
self.clientInfo['worker'] = threading.Thread(target=self.sendPacket)
self.clientInfo['worker'].start()
# state pause
elif kind == 'PAUSE':
if self.state == STATE['PLAYING']:
# msg
print ('PAUSE...')
# set state
self.state = STATE['OK']
# process
self.clientInfo['event'].set()
# response msg
response = 'RTSP/1.0 200 OK\nCSeq: ' + \
seqList[1] + \
'\nSession: ' + \
str(self.clientInfo['session'])
sk = self.clientInfo['rtspSocket'][0]
sk.send(response.encode())
elif kind == 'TEARDOWN':
# msg
print ('TEARDOWN...')
# process
self.clientInfo['event'].set()
# response msg
response = 'RTSP/1.0 200 OK\nCSeq: ' + \
seqList[1] + \
'\nSession: ' + \
str(self.clientInfo['session'])
sk = self.clientInfo['rtspSocket'][0]
sk.send(response.encode())
# close sk
self.clientInfo['rtpSocket'].close()
else:
print ('Error: Wrong kind.')
def sendPacket(self):
while 1:
# set interval
self.clientInfo['event'].wait(0.5)
# break if pause or teardown
if self.clientInfo['event'].isSet():
break
# get next frame
frame = self.clientInfo['videoStream'].getNextFrame()
if frame:
# get current frame
currentFrame = self.clientInfo['videoStream'].getCurrentFrame()
try:
# get addr
ip = self.clientInfo['rtspSocket'][1][0]
port = int(self.clientInfo['rtpPort'])
self.clientInfo['rtpSocket'].sendto(self.setPacket(frame, currentFrame), (ip, port))
except:
print('Error: Connecting failed.')
def setPacket(self, frame, currentFrame):
# set packet
rtpPacket = RtpPacket()
rtpPacket.encode(2, 0, 0, 0, currentFrame, 0, 26, 0, frame)
return rtpPacket.getPacket()
if __name__ == "__main__":
try:
port = int(sys.argv[1])
except:
print('Please add param [port]')
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.bind(('', port))
sk.listen(5)
while 1:
clientInfo = {}
clientInfo['rtspSocket'] = sk.accept()
Server(clientInfo).processThreads()
|
app_test.py
|
from __future__ import unicode_literals
import os
import threading
import time
import pexpect
import serial
from tiny_test_fw import Utility
import ttfw_idf
class SerialThread(object):
def run(self, log_path, exit_event):
with serial.Serial('/dev/ttyUSB1', 115200) as ser, open(log_path, 'wb') as f:
while True:
f.write(ser.read(ser.in_waiting))
if exit_event.is_set():
break
time.sleep(1)
def __init__(self, log_path):
self.exit_event = threading.Event()
self.t = threading.Thread(target=self.run, args=(log_path, self.exit_event,))
self.t.start()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.exit_event.set()
self.t.join(60)
if self.t.is_alive():
Utility.console_log('The pyserial thread is still alive', 'O')
@ttfw_idf.idf_custom_test(env_tag="test_jtag_arm", group="test-apps")
def test_app_loadable_elf(env, extra_data):
rel_project_path = os.path.join('tools', 'test_apps', 'system', 'gdb_loadable_elf')
app_files = ['gdb_loadable_elf.elf', 'partition_table/partition-table.bin']
example = ttfw_idf.LoadableElfTestApp(rel_project_path, app_files, target="esp32")
idf_path = example.get_sdk_path()
proj_path = os.path.join(idf_path, rel_project_path)
elf_path = os.path.join(example.binary_path, 'gdb_loadable_elf.elf')
esp_log_path = os.path.join(proj_path, 'esp.log')
with SerialThread(esp_log_path):
openocd_log = os.path.join(proj_path, 'openocd.log')
gdb_log = os.path.join(proj_path, 'gdb.log')
gdb_args = '-x {} --directory={}'.format(os.path.join(proj_path, '.gdbinit.ci'),
os.path.join(proj_path, 'main'))
with ttfw_idf.OCDProcess(openocd_log), ttfw_idf.GDBProcess(gdb_log, elf_path, gdb_args) as gdb:
gdb.pexpect_proc.sendline('') # it is for "---Type <return> to continue, or q <return> to quit---"
i = gdb.pexpect_proc.expect_exact(['Thread 1 hit Temporary breakpoint 2, app_main ()',
'Load failed'])
if i == 0:
Utility.console_log('gdb is at breakpoint')
elif i == 1:
raise RuntimeError('Load has failed. Please examine the logs.')
else:
Utility.console_log('i = {}'.format(i))
Utility.console_log(str(gdb.pexpect_proc))
# This really should not happen. TIMEOUT and EOF failures are exceptions.
raise RuntimeError('An unknown error has occurred. Please examine the logs.')
gdb.pexpect_proc.expect_exact('(gdb)')
gdb.pexpect_proc.sendline('b esp_restart')
gdb.pexpect_proc.sendline('c')
gdb.pexpect_proc.expect_exact('Thread 1 hit Breakpoint 3, esp_restart ()')
if pexpect.run('grep "Restarting now." {}'.format(esp_log_path), withexitstatus=True)[1]:
raise RuntimeError('Expected output from ESP was not received')
if __name__ == '__main__':
test_app_loadable_elf()
|
rabbit.py
|
from __future__ import annotations
import logging
import os
import time
from contextlib import contextmanager
from dataclasses import dataclass
from ssl import SSLContext
from threading import Thread
from typing import Callable, Type, Iterator
import pika
from pika import PlainCredentials, BasicProperties
from pika.adapters.blocking_connection import BlockingChannel
from pika.connection import Parameters, SSLOptions
from cancer.message import Message, Topic
from cancer.port.publisher import Publisher
from cancer.port.subscriber import Subscriber, T
_LOG = logging.getLogger(__name__)
@dataclass
class RabbitConfig:
host: str
virtual_host: str
port: int
use_ssl: bool
exchange: str
user: str
password: str
@property
def parameters(self) -> Parameters:
params = Parameters()
params.host = self.host
params.port = self.port
params.virtual_host = self.virtual_host
params.ssl_options = SSLOptions(SSLContext()) if self.use_ssl else None
params.credentials = PlainCredentials(self.user, self.password)
params.heartbeat = 30
return params
@staticmethod
def _get_required(key: str, allow_empty: bool = False) -> str:
result = os.getenv(key)
if result or (allow_empty and result is not None):
return result
raise ValueError(f"Missing key: {key}")
@classmethod
def from_env(cls) -> RabbitConfig:
return cls(
host=cls._get_required("RABBITMQ_HOST"),
virtual_host=cls._get_required("RABBITMQ_VIRTUAL_HOST"),
port=int(cls._get_required("RABBITMQ_PORT")),
exchange=cls._get_required("RABBITMQ_EXCHANGE", allow_empty=True),
use_ssl=cls._get_required("RABBITMQ_USE_TLS") == "true",
user=cls._get_required("RABBITMQ_USER"),
password=cls._get_required("RABBITMQ_PASSWORD"),
)
class RabbitPublisher(Publisher):
def __init__(self, config: RabbitConfig):
self.config = config
@contextmanager
def _connect(self) -> Iterator[pika.BlockingConnection]:
connection = pika.BlockingConnection(self.config.parameters)
try:
yield connection
finally:
connection.close()
@contextmanager
def _channel(self):
with self._connect() as connection:
channel = connection.channel()
try:
yield channel
finally:
channel.close()
def publish(self, topic: Topic, message: Message):
with self._channel() as channel:
channel.queue_declare(
topic.value,
durable=True,
auto_delete=False,
arguments={"x-queue-type": "quorum"},
)
channel.basic_publish(
exchange=self.config.exchange,
routing_key=topic.value,
body=message.serialize(),
properties=BasicProperties(
content_type="application/json",
delivery_mode=pika.spec.PERSISTENT_DELIVERY_MODE,
),
)
_LOG.info("Published message to RabbitMQ queue %s", topic.value)
class _Heartbeat:
def __init__(self, connection):
self._connection = connection
self._is_running = False
def start(self):
if self._is_running:
raise ValueError("Already running")
self._is_running = True
Thread(target=self._run, daemon=True).start()
def _run(self):
while self._is_running:
self._connection.process_data_events()
time.sleep(10)
def stop(self):
self._is_running = False
class RabbitSubscriber(Subscriber):
def __init__(self, config: RabbitConfig):
self.config = config
def subscribe(
self,
topic: Topic,
message_type: Type[T],
handle: Callable[[T], Subscriber.Result],
):
def _callback(channel: BlockingChannel, method, _, message: bytes):
try:
deserialized = message_type.deserialize(message)
except ValueError as e:
_LOG.error("Could not deserialize message", exc_info=e)
channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False)
return
heartbeat = _Heartbeat(channel.connection)
heartbeat.start()
try:
result = handle(deserialized)
except Exception as e:
_LOG.error("Unexpected exception", exc_info=e)
channel.basic_nack(method.delivery_tag, requeue=True)
return
heartbeat.stop()
if result == Subscriber.Result.Ack:
channel.basic_ack(delivery_tag=method.delivery_tag)
elif result == Subscriber.Result.Drop:
channel.basic_nack(delivery_tag=method.delivery_tag, requeue=False)
elif result == Subscriber.Result.Drop:
channel.basic_nack(delivery_tag=method.delivery_tag, requeue=True)
connection = pika.BlockingConnection(self.config.parameters)
channel = connection.channel()
try:
channel.queue_declare(
topic.value,
durable=True,
auto_delete=False,
arguments={"x-queue-type": "quorum"},
)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(
queue=topic.value,
on_message_callback=_callback,
auto_ack=False,
)
channel.start_consuming()
finally:
if not channel.is_closed:
channel.close()
if not connection.is_closed:
connection.close()
|
helper.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
import multiprocessing as mp
from collections import defaultdict
from typing import Callable
from weakref import WeakSet
import numpy as np
from megengine.autodiff.grad_manager import GradManager, get_backwarding_grad_manager
from megengine.device import get_default_device, get_device_count
from ..core._imperative_rt.core2 import apply
from ..core.ops.builtin import ParamPackConcat, ParamPackSplit
from ..functional.tensor import copy
from ..tensor import Tensor
from ..utils.future import Future
from .functional import all_reduce_sum, broadcast
from .group import WORLD, Group, group_barrier, is_distributed
def param_pack_split(inp: Tensor, offsets: list, shapes: list):
r"""
Returns split tensor to tensor list as offsets and shapes described,
only used for ``parampack``.
:param inp: input tensor.
:param offsets: offsets of outputs, length of `2 * n`,
while n is tensor nums you want to split,
format `[begin0, end0, begin1, end1]`.
:param shapes: tensor shapes of outputs.
:return: splitted tensors.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
from megengine.distributed.helper import param_pack_split
a = tensor(np.ones((10,), np.int32))
b, c = param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
print(b.numpy())
print(c.numpy())
Outputs:
.. testoutput::
[1]
[[1 1 1]
[1 1 1]
[1 1 1]]
"""
op = ParamPackSplit()
op.offsets = offsets
op.shapes = [s or (1,) for s in shapes]
outputs = apply(op, inp)
for s, x in zip(shapes, outputs):
if not s:
x._setscalar()
return outputs
def param_pack_concat(inps: list, offsets: Tensor, offsets_val: list):
r"""
Returns concated tensor, only used for ``parampack``.
:param inps: input tensors.
:param offsets: device value of offsets.
:param offsets_val: offsets of inputs, length of `2 * n`,
format `[begin0, end0, begin1, end1]`.
:return: concated tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
from megengine.distributed.helper import param_pack_concat
a = tensor(np.ones((1,), np.int32))
b = tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = tensor(offsets_val, np.int32)
c = param_pack_concat([a, b], offsets, offsets_val)
print(c.numpy())
Outputs:
.. testoutput::
[1 1 1 1 1 1 1 1 1 1]
"""
op = ParamPackConcat()
op.offsets = offsets_val
return apply(op, *inps, offsets)[0]
def get_offsets(shapes):
offsets = []
offset = 0
for shape in shapes:
offsets.append(offset)
offset += int(np.prod(shape))
offsets.append(offset)
return offsets
def pack_allreduce_split(pack_list, shapes, group, reduce_method):
offsets_val = get_offsets(shapes)
offsets = Tensor(offsets_val)
packed_grads = param_pack_concat(pack_list, offsets, offsets_val)
packed_grads = all_reduce_sum(packed_grads, group, group.comp_node)
if reduce_method == "mean":
packed_grads /= group.size
grads = param_pack_split(packed_grads, offsets_val, shapes)
return grads
class TensorFuture(Future):
def device(self):
raise "Sorry, this tensor is not ready"
def numpy(self):
raise "Sorry, this tensor is not ready"
def shape(self):
raise "Sorry, this tensor is not ready"
def dtype(self):
raise "Sorry, this tensor is not ready"
def synchronized(func: Callable):
"""
Decorator. Decorated function will synchronize when finished.
Specifically, we use this to prevent data race during hub.load"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not is_distributed():
return func(*args, **kwargs)
ret = func(*args, **kwargs)
group_barrier()
return ret
return wrapper
def _get_device_count_worker(queue, device_type):
num = get_device_count(device_type)
queue.put(num)
def get_device_count_by_fork(device_type: str):
"""
Get device count in fork thread.
See https://stackoverflow.com/questions/22950047/cuda-initialization-error-after-fork
for more information.
"""
q = mp.Queue()
p = mp.Process(target=_get_device_count_worker, args=(q, device_type))
p.start()
p.join()
return q.get()
def bcast_list_(inps: list, group: Group = WORLD):
"""
Broadcast tensors between given group.
:param inps: input tensors.
:param group: communication group.
"""
for inp in inps:
inp._reset(broadcast(inp, group))
class AllreduceCallback:
"""
Allreduce Callback with tensor fusion optimization.
:param reduce_method: the method to reduce gradiants.
:param group: communication group.
"""
def __init__(self, reduce_method: str, group: Group = WORLD):
reduce_method = reduce_method.lower()
assert reduce_method in ["sum", "mean"], "reduce_method should be sum or mean"
self._reduce_method = reduce_method
self._group = group
self._marked_gm = WeakSet()
self._param_pack_thd = 10 * 1024 * 1024
self._reset()
def _reset(self):
self._params = []
self._gradients_dict = dict()
self._futures_dict = dict()
self._packing_list = defaultdict(list)
self._packing_size = defaultdict(int)
self._grad_origin_device = dict()
def _pack(self, dtype):
if len(self._packing_list[dtype]) == 0:
return
grad_list = [self._gradients_dict[p] for p in self._packing_list[dtype]]
shapes = [p._tuple_shape for p in self._packing_list[dtype]]
reduced_grads = pack_allreduce_split(
grad_list, shapes, self._group, self._reduce_method
)
for param, grad in zip(self._packing_list[dtype], reduced_grads):
self._gradients_dict[param] = grad
self._packing_list[dtype] = []
self._packing_size[dtype] = 0
def __call__(self, param, grad):
gm = get_backwarding_grad_manager()
assert isinstance(gm, GradManager)
if gm not in self._marked_gm:
gm._register_after_backward_callback(self._flush)
self._marked_gm.add(gm)
self._params.append(param)
self._futures_dict[param] = TensorFuture(ack=False)
self._gradients_dict[param] = grad
self._grad_origin_device[param] = str(grad.device)
dtype_str = str(np.dtype(param.dtype))
dtype_size = np.dtype(param.dtype).itemsize
self._packing_list[dtype_str].append(param)
self._packing_size[dtype_str] += int(np.prod(param._tuple_shape)) * dtype_size
if self._packing_size[dtype_str] > self._param_pack_thd:
self._pack(dtype_str)
return self._futures_dict[param]
def _flush(self):
for dtype in sorted(self._packing_list.keys()):
self._pack(dtype)
for param in self._params:
grad = self._gradients_dict[param]
grad = copy(grad, self._grad_origin_device[param])
self._futures_dict[param].set(grad)
self._reset()
make_allreduce_cb = AllreduceCallback
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
elif cmd == 'render':
remote.send(env.render())
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, num):
for remote in self.remotes[:num]:
remote.send(('render', None))
results = [remote.recv() for remote in self.remotes[:num]]
@property
def num_envs(self):
return len(self.remotes)
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from .vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
info["end_ob"] = ob
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'call':
fn_name = data[0]
args = data[1]
kwargs = data[2]
remote.send(getattr(env, fn_name)(*args, **kwargs))
elif cmd == 'property':
property_name = data
remote.send(getattr(env, property_name))
else:
raise NotImplementedError
finally:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, **kwargs):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
"""
self.exp_id = kwargs["exp_id"]
self.waiting = False
self.closed = False
self.num_envs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(self.num_envs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
def call_any(func_name, *args, **kwargs):
self.remotes[0].send(("call", [func_name, args, kwargs]))
return self.remotes[0].recv()
def call_all(func_name, *args, **kwargs):
for remote in self.remotes:
remote.send(("call", [func_name, args, kwargs]))
results = []
for remote in self.remotes:
results.append(remote.recv())
return results
def property_any(property_name):
self.remotes[0].send(("property", property_name))
return self.remotes[0].recv()
def property_all(property_name):
for remote in self.remotes:
remote.send(("property", property_name))
results = []
for remote in self.remotes:
results.append(remote.recv())
return results
from munch import munchify
self.any = munchify({
"call": call_any,
"property": property_any
})
self.all = munchify({
"call": call_all,
"property": property_all
})
VecEnv.__init__(self, len(env_fns))
self.observation_space = self.any.property("observation_space")
self.action_space = self.any.property("action_space")
self.state_normalization_exclusion_list = self.any.call("get_state_normalization_exclusion_list")
self.any.call("start_recorder", 'results/%s/records.npy' % self.exp_id)
def step_async(self, actions):
self._assert_not_closed()
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
return _flatten_obs([remote.recv() for remote in self.remotes])
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def _flatten_obs(obs):
assert isinstance(obs, list) or isinstance(obs, tuple)
assert len(obs) > 0
if isinstance(obs[0], dict):
import collections
assert isinstance(obs, collections.OrderedDict)
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
|
supervisor.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of Tensorflow
training programs.
Use for a single program:
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a Tensorflow session.
sess = sv.prepare_or_create_session(FLAGS.master)
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
# Ask for all the services to stop.
sv.stop()
```
After the call to `prepare_or_create_session()`, all `Variables` in the `Graph`
have been initialized. In addition, a few services have been started
to checkpoint the model and fetch summaries.
If the program crashes and you restart it, the call to
`prepare_or_create_session()` automatically reinitializes the Variables
from most recent checkpoint.
If any of the services raises an exception, it will ask the Supervisor to stop.
In that case `should_stop()` will return True and you should stop your
training loop.
Finish by calling `stop()` to cleanly wait for the services to complete.
If a service thread raised an exception, it is re-raised in the `stop()`
call so your program can easily report it.
Use for multiple replicas:
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index, or
# job_def.name, or job_def.tasks. It's entirely up to the end user. But there
# can be only one *chief*.
is_chief = (server_def.task_index == 0)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
sess = sv.prepare_or_create_session(FLAGS.master)
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
# Ask for all the services to stop.
sv.stop()
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `prepare_or_create_session()` waits for the Model to
have been intialized before returning a session to the training code.
If one of the tasks crashes and restarts, `prepare_or_create_session()` checks
if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model
needs to be initialized, the chief task takes care of reinitializing it;
the other tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
What *master* string to use:
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
Specifying 'local' requests a Session that uses the proto-based "Master
interface" to run TensorFlow programs. It does not use an RPC subsystem to
communicate within the prcoess, and cannot communicate with remote TensorFlow
workers.
Specifying 'localhost:port' requests a Session that uses the loopback RPC
interface, and also allows the in-process master to access remote tensorflow
workers.
Advanced use.
Launching additional services.
`prepare_or_create_session()` launches the Checkpoint and Summary
services (threads). If you need more services to run you can simply
launch them after `prepare_or_create_session()` returns. The Supervisor
uses a Coordinator to help multiple threads stop together, so pass that
coordinator ('sv.coord') to the threads you launch.
Example: Start a QueueRunner to prefetch inputs.
```python
...build the model with a QueueRunner to prefetch inputs...
qr = QueueRunner(input_queue, [enqueue_op])
...
sv = Supervisor(logdir='/tmp/mydir')
sess = sv.prepare_or_create_session(FLAGS.master)
# Start the queue runner threads.
threads = qr.create_threads(sess, sv.coord, start=True)
# Catch OutOfRangeError, which signals that your input queue is exhausted.
try:
while not sv.should_stop():
sess.run(my_train_op)
except tf.errors.OutOfRangeError:
pass
# Wait for the QueueRunner and service threads to complete.
sv.stop(threads)
```
Note: Starting `QueueRunner` threads is very common, to the Supervisor
provides a convenience method named `start_queue_runners()`. If you use
that method you do not have to keep track of the started threads and
can just call `stop()` normally:
```python
...build the model with a QueueRunner to prefetch inputs...
qr = QueueRunner(input_queue, [enqueue_op])
...
sv = Supervisor(logdir='/tmp/mydir')
sess = sv.prepare_or_create_session(FLAGS.master)
# Start the queue runner threads.
sv.start_queue_runners(sess, [qr])
# Catch OutOfRangeError, which signals that your input queue is exhausted.
try:
while not sv.should_stop():
sess.run(my_train_op)
except tf.errors.OutOfRangeError:
pass
# Wait for the QueueRunner and service threads to complete.
sv.stop()
```
Launching fewer services.
`prepare_or_create_session()` launches the `Summary` and `Checkpoint`
services (threads) which use either the optionally `summary_op`
and `saver` passed to the constructor, or default ones created
automatically by the `Supervisor`. If you want to run your own summary
and checkpointing logic, disable these services by passing `None` to the
`summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, prepare_or_create_session() does not start the
# summary thread.
sess = sv.prepare_or_create_session(FLAGS.master)
for step in xrange(1000000):
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
Custom Model Initialization.
`prepare_or_create_session()` only supports initializing the model by running an
`init_op`. If you have special initialization needs, use `local_init_op`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
from tensorflow.python.training.session_manager import SessionManager
class Supervisor(object):
"""Training helper that checkpoints and computes summaries."""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
# Protects _TENSORFLOW_LAUNCHED
_launch_lock = threading.Lock()
# True if we have already launched the tensorflow in-process server.
_TENSORFLOW_LAUNCHED = False
def __init__(self, graph=None, ready_op=USE_DEFAULT, is_chief=True,
init_op=USE_DEFAULT, init_feed_dict=None,
local_init_op=USE_DEFAULT, logdir=None,
summary_op=USE_DEFAULT, saver=USE_DEFAULT,
global_step=USE_DEFAULT, save_summaries_secs=120,
save_model_secs=600, recovery_wait_secs=30,
checkpoint_basename="model.ckpt", session_manager=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: `Operation` to check if the model is initialized. This
operation is run by supervisors in `prepare_or_create_session()` to
check if the model is ready to use. The model is considered ready if
that operation succeeds. Defaults to the operation returned from
`tf.assert_variables_initialized()` If `None`, the model is not checked
for readiness.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from merge_all_summaries(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 ot tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(ready_op=ready_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._is_chief = is_chief
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
self._recovery_wait_secs = recovery_wait_secs
self._coord = coordinator.Coordinator()
if logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._save_path = None
self._summary_writer = None
self._init_session_manager(session_manager=session_manager)
self._started_threads = []
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op, graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self, ready_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Operation` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.assert_variables_initialized()
if ready_op is not None:
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.initialize_all_variables()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT create an op based on the `LOCAL_INITIALIZERS` graph
collection.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.all_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initilizes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = logging_ops.merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
if not self._is_chief:
return
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(),
self._logdir, "graph.pbtxt")
if self._summary_writer:
self._summary_writer.add_graph(self._graph)
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
return
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._summary_op is not None and self._save_summaries_secs:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None and self._save_summaries_secs:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
self._started_threads.extend(threads)
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True,):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow `master` to use. If not specified or
empty a 'Direct Session' is created.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
Returns:
A Session object that can be used to drive the model.
"""
if self._is_chief:
sess = self._session_manager.prepare_session(
master, self.init_op, self.saver, self._logdir,
wait_for_checkpoint=wait_for_checkpoint, max_wait_secs=max_wait_secs,
config=config, init_feed_dict=self._init_feed_dict)
self._write_graph()
# For users who recreate the session with prepare_or_create_session(), we
# need to clear the coordinator's stop_event so that threads managed by
# the coordinator can run.
self._coord.clear_stop()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
self._started_threads.extend(threads)
return threads
def loop(self, timer_interval_secs, target, args=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(args)`
repeatedly. Otherwise `target(args)` is called every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args)
looper.start()
self._started_threads.append(looper)
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not Close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services plus the
threads started for `QueueRunners` if `start_queue_runners()` was
called. To wait on an additional set of threads, pass the list in this
parameter and they will be merged with the internal list of running
services.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True`.
"""
join_threads = []
join_threads.extend(self._started_threads)
if threads is not None:
join_threads.extend(threads)
self._coord.request_stop()
self._coord.join(join_threads)
# Close the write last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._started_threads = []
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._logdir:
raise RuntimeError("summary_computed() requires a logdir")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
if self._summary_writer:
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type == "Variable" and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
main.py
|
import background
import threading
import show
def main():
background_ = threading.Thread(target=background.main)
show_ = threading.Thread(target=show.main)
background_.start()
show_.start()
if __name__ == '__main__':
main()
|
build.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
# pylint: disable=invalid-name
import collections
import fnmatch
import hashlib
import json
import optparse
import os
import re
import shutil
import subprocess
import threading
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
COMPILED_JS_DIR = os.path.join('local_compiled_js', '')
TSC_OUTPUT_LOG_FILEPATH = 'tsc_output_log.txt'
TSCONFIG_FILEPATH = 'tsconfig.json'
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
FONTS_RELATIVE_DIRECTORY_PATH = os.path.join('fonts', '')
# /webfonts is needed for font-awesome-5, due to changes
# in its directory structure hence, FAM-5 now doesn't use /fonts
# but /fonts is needed by BootStrap and will be removed later
# if /fonts is not used by any other library.
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'compiled_js_dir': os.path.join('local_compiled_js', 'extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', 'dev', 'head', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', 'dev', 'head', ''),
'compiled_js_dir': os.path.join(
'local_compiled_js', 'core', 'templates', 'dev', 'head', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', 'head', ''),
'out_dir': os.path.join('build', 'templates', 'head', '')
}
HASHES_JS_FILENAME = 'hashes.js'
HASHES_JS_FILEPATH = os.path.join(ASSETS_DEV_DIR, HASHES_JS_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
'..', 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
NODE_FILE = os.path.join(
PARENT_DIR, 'oppia_tools', 'node-10.15.3', 'bin', 'node')
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc')
# These files are present in both extensions and local_compiled_js/extensions.
# They are required in local_compiled_js since they contain code used in
# other ts files and excluding them from compilation will create compile
# errors due to missing variables. So, the files should be built only from
# one location instead of both the locations.
JS_FILEPATHS_NOT_TO_BUILD = (
'extensions/interactions/LogicProof/static/js/generatedDefaultData.js',
'extensions/interactions/LogicProof/static/js/generatedParser.js',
'core/templates/dev/head/expressions/ExpressionParserService.js')
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/fonts/*',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'*/dist/get-started-page.mainpage.html',
'*/dist/splash-page.mainpage.html',
'*/dist/splash_at0.html',
'*/dist/splash_at1.html',
'*/dist/teach-page.mainpage.html')
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
def generate_app_yaml():
"""Generate app.yaml from app_dev.yaml."""
dev_file_prefix = 'core/templates/dev/head'
prod_file_prefix = 'build/templates/head'
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with open(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
for file_path in FILEPATHS_NOT_TO_RENAME:
if '/dist/' in file_path:
content = content.replace(
dev_file_prefix + file_path[1:],
prod_file_prefix + file_path[1:])
content = content.replace('version: default', '')
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with open(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def require_compiled_js_dir_to_be_valid():
"""Checks if COMPILED_JS_DIR matches the output directory used in
TSCONFIG_FILEPATH.
Raises:
Exception: The COMPILED_JS_DIR does not match the outDir in
TSCONFIG_FILEPATH.
"""
out_dir = ''
with open(TSCONFIG_FILEPATH) as f:
config_data = json.load(f)
out_dir = os.path.join(config_data['compilerOptions']['outDir'], '')
if out_dir != COMPILED_JS_DIR:
raise Exception(
'COMPILED_JS_DIR: %s does not match the output directory '
'in %s: %s' % (COMPILED_JS_DIR, TSCONFIG_FILEPATH, out_dir))
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
YUICOMPRESSOR_DIR, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(content)
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with open(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
print 'Minifying and creating sourcemap for %s' % source_path
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
NODE_FILE, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to
ensure exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError: One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError: The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
print 'Comparing %s vs %s' % (first_dir_list, second_dir_list)
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream, file_hashes):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
content = source_file_stream.read()
for filepath, file_hash in file_hashes.iteritems():
# We are adding hash in all file paths except for html paths.
# This is because html paths are used by backend and we work with
# paths without hash part in backend.
if not filepath.endswith('.html'):
filepath_with_hash = _insert_hash(filepath, file_hash)
content = content.replace(
'%s%s' % (TEMPLATES_DEV_DIR, filepath),
'%s%s' % (
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
filepath_with_hash))
content = content.replace(
'%s%s' % (ASSETS_DEV_DIR, filepath),
'%s%s' % (ASSETS_OUT_DIR, filepath_with_hash))
content = content.replace(
'%s%s' % (EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'], filepath),
'%s%s' % (
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
filepath_with_hash))
content = content.replace(
'%s%s' % (THIRD_PARTY_GENERATED_DEV_DIR, filepath),
'%s%s' % (THIRD_PARTY_GENERATED_OUT_DIR, filepath_with_hash))
content = REMOVE_WS(' ', content)
write_to_file_stream(target_file_stream, content)
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with open(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
MINIFIED_THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
MINIFIED_THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
THIRD_PARTY_JS_FILEPATH, MINIFIED_THIRD_PARTY_JS_FILEPATH)
_minify(THIRD_PARTY_CSS_FILEPATH, MINIFIED_THIRD_PARTY_CSS_FILEPATH)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(THIRD_PARTY_JS_FILEPATH)
safe_delete_file(THIRD_PARTY_CSS_FILEPATH)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
print 'Building third party libs at %s' % third_party_directory_path
THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
FONTS_DIR = os.path.join(
third_party_directory_path, FONTS_RELATIVE_DIRECTORY_PATH)
WEBFONTS_DIR = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(THIRD_PARTY_JS_FILEPATH)
with open(THIRD_PARTY_JS_FILEPATH, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(THIRD_PARTY_CSS_FILEPATH)
with open(THIRD_PARTY_CSS_FILEPATH, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(FONTS_DIR)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], FONTS_DIR))
ensure_directory_exists(WEBFONTS_DIR)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], WEBFONTS_DIR))
def build_using_webpack():
"""Execute webpack build process. This takes all TypeScript files we have in
/templates/dev/head and generates JS bundles according the require() imports
and also compiles HTML pages into the /templates/dev/head/dist folder.
The settings for this are specified in webpack.prod.config.ts.
"""
print 'Building webpack'
cmd = '%s --config %s' % (
WEBPACK_FILE, WEBPACK_PROD_CONFIG)
subprocess.check_call(cmd, shell=True)
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return filepath not in JS_FILEPATHS_NOT_TO_BUILD and not any(
filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
print 'Processing %s' % os.path.join(os.getcwd(), source)
print 'Copying into %s' % os.path.join(os.getcwd(), target)
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
print 'Copying %s' % os.path.join(root, directory)
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
relative_path = os.path.relpath(source_path, source)
if hash_should_be_inserted(source + relative_path):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with open(filepath, 'rb') as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
print('Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
complete_filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(
complete_filepath, directory_path)
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.iteritems():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def get_hashes_json_file_contents(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
hashes_json = json.dumps(filtered_hashes)
return 'var hashes = JSON.parse(\'%s\');' % (hashes_json)
def minify_func(source_path, target_path, file_hashes, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
print 'Building %s' % source_path
with open(source_path, 'r+') as source_html_file:
with open(target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file, file_hashes)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
print 'Minifying %s' % source_path
_minify(source_path, target_path)
else:
print 'Copying %s' % source_path
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError as threadAlreadyStarted:
raise OSError(threadAlreadyStarted.message)
def generate_build_tasks_to_build_all_files_in_directory(
source, target, file_hashes):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
print 'Processing %s' % os.path.join(os.getcwd(), source)
print 'Generating into %s' % os.path.join(os.getcwd(), target)
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
print 'Building directory %s' % os.path.join(root, directory)
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, file_hashes, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths, file_hashes):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, file_hashes, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
print 'Scanning directory %s to remove deleted file' % staging_directory
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
relative_path = os.path.relpath(target_path, staging_directory)
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
print ('Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
FILE_EXTENSIONS_NOT_TO_TRACK = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.iteritems():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_NOT_TO_TRACK):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
print ('The following files will be rebuilt due to recent changes: %s' %
recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict, file_hashes):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'compiled_js_dir': the directory that contains compiled js files
to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
compiled_js_dir = dirnames_dict['compiled_js_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
print 'Creating new %s folder' % staging_dir
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir, file_hashes)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
compiled_js_dir, staging_dir, file_hashes)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
print (
'Staging dir exists, re-building all %s files'
% str(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild, file_hashes)
dev_dir_hashes = get_file_hashes(source_dir)
compiled_js_dir_hashes = get_file_hashes(compiled_js_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
source_hashes.update(compiled_js_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
print 'Getting files that have changed between %s and %s' % (
source_dir, out_dir)
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
print 'Re-building recently changed files at %s' % source_dir
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames,
file_hashes)
else:
print 'No changes detected. Using previously built files.'
print 'Getting files that have changed between %s and %s' % (
compiled_js_dir, out_dir)
recently_changed_filenames = get_recently_changed_filenames(
compiled_js_dir_hashes, out_dir)
if recently_changed_filenames:
print 'Re-building recently changed files at %s' % source_dir
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
compiled_js_dir, staging_dir, recently_changed_filenames,
file_hashes)
else:
print 'No changes detected. Using previously built files.'
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError: The hash dict is empty.
ValueError: Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError: The filename does not contain hash.
KeyError: The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# head/pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.js, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JS_FILENAME, file_hashes[HASHES_JS_FILENAME])
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH])
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_build_directory():
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
print 'Building Oppia in production mode...'
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Create hashes for all directories and files.
HASH_DIRS = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
EXTENSIONS_DIRNAMES_TO_DIRPATHS['compiled_js_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['compiled_js_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for HASH_DIR in HASH_DIRS:
hashes.update(get_file_hashes(HASH_DIR))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
ensure_directory_exists(HASHES_JS_FILEPATH)
with open(HASHES_JS_FILEPATH, 'w+') as hashes_js_file:
write_to_file_stream(
hashes_js_file, get_hashes_json_file_contents(hashes))
# Update hash dict with newly created hashes.js.
hashes.update({HASHES_JS_FILENAME: generate_md5_hash(HASHES_JS_FILEPATH)})
# Make sure /assets/hashes.js is available to the frontend.
_ensure_files_exist([HASHES_JS_FILEPATH])
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, hashes)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS, hashes)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
COPY_INPUT_DIRS = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
COPY_OUTPUT_DIRS = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR]
assert len(COPY_INPUT_DIRS) == len(COPY_OUTPUT_DIRS)
for i in xrange(len(COPY_INPUT_DIRS)):
safe_delete_directory_tree(COPY_OUTPUT_DIRS[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
COPY_INPUT_DIRS[i], COPY_OUTPUT_DIRS[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(COPY_OUTPUT_DIRS, hashes)
SOURCE_DIRS_FOR_ASSETS = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
OUTPUT_DIRS_FOR_ASSETS = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(SOURCE_DIRS_FOR_ASSETS, OUTPUT_DIRS_FOR_ASSETS)
SOURCE_DIRS_FOR_THIRD_PARTY = [THIRD_PARTY_GENERATED_DEV_DIR]
OUTPUT_DIRS_FOR_THIRD_PARTY = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
SOURCE_DIRS_FOR_THIRD_PARTY, OUTPUT_DIRS_FOR_THIRD_PARTY)
SOURCE_DIRS_FOR_EXTENSIONS = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
EXTENSIONS_DIRNAMES_TO_DIRPATHS['compiled_js_dir']]
OUTPUT_DIRS_FOR_EXTENSIONS = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(SOURCE_DIRS_FOR_EXTENSIONS, OUTPUT_DIRS_FOR_EXTENSIONS)
SOURCE_DIRS_FOR_TEMPLATES = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['compiled_js_dir']]
OUTPUT_DIRS_FOR_TEMPLATES = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(SOURCE_DIRS_FOR_TEMPLATES, OUTPUT_DIRS_FOR_TEMPLATES)
# Clean up un-hashed hashes.js.
safe_delete_file(HASHES_JS_FILEPATH)
print 'Build completed.'
def compile_typescript_files(project_dir):
"""Compiles typescript files to produce javascript files in
local_compiled_js folder.
Args:
project_dir: str. The project directory which contains the ts files
to be compiled.
"""
require_compiled_js_dir_to_be_valid()
safe_delete_directory_tree(COMPILED_JS_DIR)
print 'Compiling ts files...'
cmd = ['./node_modules/typescript/bin/tsc', '--project', project_dir]
subprocess.check_call(cmd)
def compile_typescript_files_continuously(project_dir):
"""Compiles typescript files continuously i.e enable a watcher which
monitors any changes in js files and recompiles them to ts files.
Args:
project_dir: str. The project directory which contains the ts files
to be compiled.
"""
kill_cmd = (
'kill `ps aux | grep "node_modules/typescript/bin/tsc --project . '
'--watch" | awk \'{print $2}\'`'
)
subprocess.call(kill_cmd, shell=True, stdout=subprocess.PIPE)
require_compiled_js_dir_to_be_valid()
safe_delete_directory_tree(COMPILED_JS_DIR)
print 'Compiling ts files in watch mode...'
cmd = [
'./node_modules/typescript/bin/tsc', '--project', project_dir,
'--watch']
with open('tsc_output_log.txt', 'w') as out:
subprocess.Popen(cmd, stdout=out)
while True:
with open(TSC_OUTPUT_LOG_FILEPATH, 'r') as f:
lines = f.readlines()
if len(lines):
# We are checking only the last line here since whenever
# typescript is done with initial compilation with or
# without errors, the last line will always read
# 'Found x errors. Watching for file changes'.
last_output = lines[len(lines) - 1]
if 'Watching for file changes' in last_output:
return
def build():
"""The main method of this script.
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
"""
parser = optparse.OptionParser()
parser.add_option(
'--prod_env', action='store_true', default=False, dest='prod_mode')
parser.add_option(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
parser.add_option(
'--enable_watcher', action='store_true', default=False)
options = parser.parse_args()[0]
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
if not options.enable_watcher:
compile_typescript_files('.')
else:
compile_typescript_files_continuously('.')
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only and not options.prod_mode:
raise Exception(
'minify_third_party_libs_only should not be set in non-prod mode.')
if options.prod_mode:
build_using_webpack()
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
generate_app_yaml()
if not options.minify_third_party_libs_only:
generate_build_directory()
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
build()
|
main.py
|
"""Main loop for bridge subsystem."""
from shared import config
from shared.controller import Controller
from bridge.bridge import Bridge
from flask import Flask, request, jsonify
from flask_htpasswd import HtPasswdAuth
from flask_cors import CORS
import threading
from shared.streamtologger import StreamToLogger
import logging
import sys
logging.basicConfig(
filename=config.LOG_DIR + "bridge.log",
# encoding='utf-8',
filemode='a', format='%(asctime)s %(levelname)s:%(message)s',
level=config.LOG_LEVEL)
logger = logging.getLogger("bridge")
werklog = logging.getLogger('werkzeug')
werklog.setLevel(logging.ERROR)
# redirect stdout and stderr to log file - do this before production
sys.stdout = StreamToLogger(logger,logging.INFO)
sys.stderr = StreamToLogger(logger,logging.ERROR)
def init_controller_obj():
# let's get this party started
controller_obj = Bridge()
return controller_obj
def program_loop(controller_obj):
try:
controller_obj.start()
except KeyboardInterrupt:
logging.info(f"{whoami} interrupted.")
controller_obj.stop()
except:
logging.exception('Got exception on main handler')
raise
whoami = "bridge"
controller_obj = init_controller_obj()
# threaded program_loop(controller_obj)
#
thread_obj = threading.Thread(target=program_loop, args=(controller_obj,), daemon=True)
thread_obj.start()
# Flask controller
#
# Create the server object
app = Flask(__name__)
#
# Configure basic auth with htpasswd file
# app.config['FLASK_HTPASSWD_PATH'] = config.HTPASSWD_FILE
# app.config['FLASK_SECRET'] = 'SECRETSECRETSECRET'
# app.config['FLASK_AUTH_ALL'] = True
# htpasswd = HtPasswdAuth(app)
#
# Serve CORS header
domain_list = []
for host in config.CONTROLLERS.values():
domain_list.append("http://" + host["server"] + ':' + str(host["port"]))
domain_list.append("http://" + host["altserv"] + ':' + str(host["port"]))
CORS(app,
# supports_credentials=True,
origins=domain_list)
@app.route("/cmd",methods = ['POST', 'GET'])
def cmd():
if request.method == 'GET':
order_obj = request.args.to_dict(flat=True)
else:
order_obj = request.get_json(force=True)
response = jsonify(controller_obj.act_on_order(order_obj))
# response.headers.add('Access-Control-Allow-Origin', '*')
return response
app.run(host="0.0.0.0", port=config.CONTROLLERS[whoami]["port"],
debug=config.DEBUG, use_reloader=False)
|
kanilogServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from kanilog.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kanilog'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kanilog.kanilogImpl import kanilog # noqa @IgnorePep8
impl_kanilog = kanilog(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kanilog'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kanilog.run_kanilog,
name='kanilog.run_kanilog',
types=[dict])
self.method_authentication['kanilog.run_kanilog'] = 'required' # noqa
self.rpc_service.add(impl_kanilog.status,
name='kanilog.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kanilog ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_logging.py
|
# Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
# The following imports are needed only for tests which
# require threading
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
except ImportError:
threading = None
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
if threading:
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
try:
asyncore.loop(poll_interval, map=self._map)
except OSError:
# On FreeBSD 8, closing the server repeatably
# raises this error. We swallow it if the
# server has been closed.
if self.connected or self.accepting:
raise
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
self._thread.join(timeout)
alive = self._thread.is_alive()
self._thread = None
if alive:
self.fail("join() timed out")
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
self._thread.join(timeout)
alive = self._thread.is_alive()
self._thread = None
if alive:
self.fail("join() timed out")
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPHandlerTest(BaseTest):
TIMEOUT = 8.0
@support.reap_threads
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT) # 14314: don't wait forever
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
if threading:
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
if threading:
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
@unittest.skipUnless(threading, 'Threading required for this test.')
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if threading and hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
@unittest.skipUnless(threading, 'Threading required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
@unittest.skipUnless(threading, 'Threading required for this test.')
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
@support.reap_threads
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
#See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
if t.is_alive():
self.fail("join() timed out")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
@support.reap_threads
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@support.requires_multiprocessing_queue
@patch.object(logging.handlers.QueueListener, 'handle')
@support.reap_threads
def test_handle_called_with_mp_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
@support.requires_multiprocessing_queue
@support.reap_threads
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
if threading:
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
#print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
option_picker.py
|
import traceback
from Tkinter import *
from multiprocessing import Queue
from tkColorChooser import askcolor
import json
from string import maketrans, lower
import re
import ttk
import pygame.sysfont
from options import Options
import logging
import urllib2
import webbrowser
import platform
import threading
from error_stuff import log_error
class OptionsMenu(object):
"""
These are the standard save and load options functions.
"""
def __init__(self):
self.options = Options()
# Our 'safe' list of fonts that should work in pygame
self.fonts = ['Andalus', 'Angsana New', 'AngsanaUPC', 'Arial', 'Arial Black', 'Browallia New', 'BrowalliaUPC',
'Comic Sans MS', 'Cordia New', 'CordiaUPC', 'Courier New', 'DFKai-SB', 'David', 'DilleniaUPC',
'Estrangelo Edessa', 'FrankRuehl', 'Franklin Gothic Medium', 'Gautami', 'Georgia', 'Impact',
'IrisUPC', 'JasmineUPC', 'KodchiangUPC', 'Latha', 'LilyUPC', 'Lucida Console', 'MV Boli',
'Mangal', 'Microsoft Sans Serif', 'Miriam', 'Miriam Fixed', 'Narkisim', 'Raavi', 'Rod', 'Shruti',
'SimHei', 'Simplified Arabic', 'Simplified Arabic Fixed', 'Sylfaen', 'Tahoma', 'Times New Roman',
'Traditional Arabic', 'Trebuchet MS', 'Tunga', 'Verdana']
self.game_versions = ['Rebirth', 'Afterbirth', 'Afterbirth+', 'Antibirth']
self.network_queue = Queue()
# Check if the system has the fonts installed, and remove them from the list if it doesn't
try:
valid_pygame_fonts = [lower(x.replace(" ", "")) for x in self.fonts]
system_fonts = pygame.sysfont.get_fonts()
to_delete = []
for index, font in enumerate(valid_pygame_fonts):
if font not in system_fonts:
to_delete += [index]
for index in to_delete[::-1]:
del self.fonts[index]
except:
log_error("There may have been an error detecting system fonts.\n" + traceback.print_exc())
pretty_name_map = {"read_from_server": "Watch Someone Else",
"write_to_server": "Let Others Watch Me",
"twitch_name": "Their Twitch Name",
"bold_font": "Bold",
"blck_cndl_mode": "BLCK CNDL mode",
"custom_title_enabled": "Change Window Title",
"log_file_check_seconds": "Check log file every"}
label_after_text = {"message_duration":"second(s)",
"framerate_limit":"fps",
"log_file_check_seconds": "second(s)"}
connection_labels = {"starting":"Connecting to server for player list...",
"done": "Connecting to server for player list... Done",
"fail": "Connecting to server for player list... Failed"}
def pretty_name(self, s):
# Change from a var name to something you'd show the users
if self.pretty_name_map.has_key(s):
return self.pretty_name_map.get(s)
return " ".join(s.split("_")).title()
def color_callback(self, source):
# Prompt a color picker, set the options and the background/foreground of the button
nums, hex_color = askcolor(color=getattr(self.options, source), title="Color Chooser")
if hex_color:
opposite = self.opposite_color(hex_color)
setattr(self.options, source, hex_color.upper())
self.buttons[source].configure(bg=hex_color, fg=opposite)
def checkbox_callback(self):
# Just for the "show decription" checkbox -- to disable the message duration entry
if not self.checks.get("show_description").get():
self.entries["message_duration"].configure(state=DISABLED)
else:
self.entries["message_duration"].configure(state=NORMAL)
# Disable custom message if we don't have to show it
if not self.checks.get("show_status_message").get():
self.entries["status_message"].configure(state=DISABLED)
else:
self.entries["status_message"].configure(state=NORMAL)
# Just for the "Custom Title Enabled" checkbox -- to disable the "Custom Title" entry
if not self.checks.get("custom_title_enabled").get():
self.entries["custom_title"].configure(state=DISABLED)
else:
self.entries["custom_title"].configure(state=NORMAL)
# Writing to server occurs when state changes, so enable read delay if we are reading
if self.checks.get("read_from_server").get():
self.entries["read_delay"].grid()
self.entries["twitch_name"].grid()
self.labels["read_delay"].grid()
self.labels["twitch_name"].grid()
else:
self.entries["read_delay"].grid_remove()
self.entries["twitch_name"].grid_remove()
self.labels["read_delay"].grid_remove()
self.labels["twitch_name"].grid_remove()
self.labels["server_connect_label"].config(text="")
if self.checks.get("change_server").get():
self.entries["trackerserver_url"].grid()
self.labels["trackerserver_url"].grid()
else:
self.entries["trackerserver_url"].grid_remove()
self.labels["trackerserver_url"].grid_remove()
# Disable authkey if we don't write to server
if self.checks.get("write_to_server").get():
self.entries["trackerserver_authkey"].grid()
self.labels["trackerserver_authkey"].grid()
self.buttons["authkey_button"].grid()
else:
self.entries["trackerserver_authkey"].grid_remove()
self.labels["trackerserver_authkey"].grid_remove()
self.buttons["authkey_button"].grid_remove()
def read_callback(self):
if self.checks.get("read_from_server").get():
self.checks.get("write_to_server").set(0)
self.labels["server_connect_label"].config(text=self.connection_labels["starting"])
t = threading.Thread(target=self.get_server_userlist_and_enqueue)
t.start()
self.checkbox_callback()
def write_callback(self):
if self.checks.get("write_to_server").get():
self.checks.get("read_from_server").set(0)
self.checkbox_callback()
def save_callback(self):
# Callback for the "save" option -- rejiggers options and saves to options.json, then quits
for key, value in self.entries.iteritems():
if key in self.integer_keys:
# Cast this as a float first to avoid errors if the user puts a value of 1.0 in an options, for example
setattr(self.options, key, int(float(value.get())))
elif key in self.float_keys:
val = float(value.get())
setattr(self.options, key, val)
elif hasattr(value, "get"):
setattr(self.options, key, value.get())
for key, value in self.checks.iteritems():
setattr(self.options, key, True if value.get() else False)
self.root.destroy()
def seconds_to_text(self, seconds):
if seconds < 60:
return str(seconds) + " second" + ("s" if seconds > 1 else "")
minutes = seconds / 60
if minutes < 60:
return str(minutes) + " minute" + ("s" if minutes > 1 else "")
hours = minutes / 60
if hours < 24:
return str(hours) + " hour" + ("s" if hours > 1 else "")
days = hours / 24
return str(days) + " day" + ("s" if days > 1 else "")
def get_server_userlist_and_enqueue(self):
try:
url = self.entries['trackerserver_url'].get() + "/tracker/api/userlist/"
json_state = urllib2.urlopen(url).read()
users = json.loads(json_state)
success = True
except Exception:
log_error("Problem getting userlist from tracker server\n" + traceback.format_exc())
users = []
success = False
network_result = {"users": users, "success": success}
self.network_queue.put(network_result)
def get_server_twitch_client_id(self):
try:
url = self.entries['trackerserver_url'].get() + "/tracker/api/twitchclientid/"
return urllib2.urlopen(url).read()
except Exception:
log_error("Couldn't get twitch client id from tracker server\n" + traceback.format_exc())
return None
def process_network_results(self):
while self.network_queue.qsize():
try:
network_result = self.network_queue.get(0)
users_combobox_list = []
for user in network_result["users"]:
formatted_time_ago = self.seconds_to_text(user["seconds"])
list_entry = user["name"] + " (updated " + formatted_time_ago + " ago)"
users_combobox_list.append(list_entry)
self.entries['twitch_name']['values'] = users_combobox_list
label = "done" if network_result["success"] else "fail"
self.labels["server_connect_label"].config(text=self.connection_labels[label])
except Queue.Empty:
pass
self.root.after(100, self.process_network_results)
def trim_name(self, event):
name = self.entries['twitch_name'].get()
name = name.partition(" (")[0]
self.entries['twitch_name'].set(name)
# From: http://code.activestate.com/recipes/527747-invert-css-hex-colors/
def opposite_color(self, color):
# Get the opposite color of a hex color, just to make text on buttons readable
color = color.lower()
table = maketrans('0123456789abcdef', 'fedcba9876543210')
return str(color).translate(table).upper()
# From: http://stackoverflow.com/questions/4140437/interactively-validating-entry-widget-content-in-tkinter
def ValidateNumeric(self, d, i, P, s, S, v, V, W):
# This validation is a biiit janky, just some crazy regex that checks P (value of entry after modification)
return P == "" or re.search("^\d+(\.\d*)?$", P) is not None
def run(self):
# Create root
self.root = Tk()
self.root.attributes("-topmost", True)
self.root.wm_title("Item Tracker Options")
self.root.resizable(False, False)
self.root.iconbitmap(default='options.ico')
# Generate numeric options by looping over option types
self.integer_keys = ["message_duration", "framerate_limit", "read_delay"]
self.float_keys = ["size_multiplier", "log_file_check_seconds"]
self.entries = {}
self.labels = {}
self.checks = {}
self.buttons = {}
# Draw the "Text Options" box
text_options_frame = LabelFrame(self.root, text="Text Options", padx=20, pady=20)
text_options_frame.grid(row=0, column=0, padx=5, pady=5)
validate_numeric_field = (self.root.register(self.ValidateNumeric), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
next_row = 0
for index, opt in enumerate(["message_duration"]):
Label(text_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(text_options_frame, validate="key", validatecommand=validate_numeric_field)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
if opt in self.label_after_text:
Label(text_options_frame, text=self.label_after_text[opt]).grid(row=next_row, column=2)
next_row += 1
for index, opt in enumerate(["show_font"]):
Label(text_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
initialfont = StringVar()
initialfont.set(getattr(self.options, opt))
self.entries[opt] = ttk.Combobox(text_options_frame, values=sorted(self.fonts), textvariable=initialfont, state='readonly')
self.entries[opt].grid(row=next_row, column=1)
for index, opt in enumerate(["bold_font"]):
self.checks[opt] = IntVar()
c = Checkbutton(text_options_frame, text=self.pretty_name(opt), variable=self.checks[opt])
c.grid(row=next_row, column=2)
next_row += 1
if getattr(self.options, opt):
c.select()
for index, opt in enumerate(["status_message"]):
Label(text_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(text_options_frame)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
next_row += 1
text_checkboxes = ["show_description", "show_status_message", "word_wrap"]
for index, opt in enumerate(text_checkboxes):
self.checks[opt] = IntVar()
c = Checkbutton(text_options_frame, text=self.pretty_name(opt), variable=self.checks[opt])
c.grid(row=len(text_checkboxes) + 1 + index / 2, column=index % 2) # 2 checkboxes per row
if getattr(self.options, opt):
c.select()
# Disable letting the user set the message duration if the show description option is disabled
if opt == "show_description" or opt == "show_status_message":
c.configure(command=self.checkbox_callback)
# Draw the other options box
display_options_frame = LabelFrame(self.root, text="", padx=20, pady=20)
display_options_frame.grid(row=1, column=0, padx=5, pady=5)
next_row = 0
for index, opt in enumerate(["game_version"]):
Label(display_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
initialversion = StringVar()
initialversion.set(getattr(self.options, opt))
self.entries[opt] = ttk.Combobox(display_options_frame, values=self.game_versions, textvariable=initialversion, state='readonly')
self.entries[opt].grid(row=next_row, column=1)
next_row += 1
for index, opt in enumerate(["framerate_limit", "log_file_check_seconds", "size_multiplier"]):
Label(display_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(display_options_frame, validate="key", validatecommand=validate_numeric_field)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
if opt in self.label_after_text:
Label(display_options_frame, text=self.label_after_text[opt]).grid(row=next_row, column=2)
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["item_details_link"]):
Label(display_options_frame, text=self.pretty_name(opt)).grid(row=next_row)
self.entries[opt] = Entry(display_options_frame)
self.entries[opt].grid(row=next_row, column=1)
self.entries[opt].insert(0, getattr(self.options, opt))
next_row += 1
# Generate buttons by looping over option types
for index, opt in enumerate(["background_color", "text_color"]):
self.buttons[opt] = Button(
display_options_frame,
text=self.pretty_name(opt),
bg=getattr(self.options, opt),
fg=self.opposite_color(getattr(self.options, opt)),
command=lambda opt=opt: self.color_callback(opt)
)
self.buttons[opt].grid(row=len(self.entries), column=index)
# Generate checkboxes, with special exception for show_description for message duration
for index, opt in enumerate(
["enable_mouseover", "show_floors", "show_rerolled_items", "show_health_ups",
"show_space_items", "show_blind_icon", "make_items_glow", "blck_cndl_mode",
"check_for_updates", "custom_title_enabled"]):
self.checks[opt] = IntVar()
c = Checkbutton(display_options_frame, text=self.pretty_name(opt), variable=self.checks[opt])
c.grid(row=len(self.entries) + 1 + index / 2, column=index % 2) # 2 checkboxes per row
if getattr(self.options, opt):
c.select()
if opt == "custom_title_enabled":
c.configure(command=self.checkbox_callback)
next_row += len(self.entries) / 2 + 1
# Generate label for custom title
Label(display_options_frame, text=self.pretty_name("custom_title")).grid(row=next_row)
self.entries["custom_title"] = Entry(display_options_frame)
self.entries["custom_title"].grid(row=next_row, column=1)
self.entries["custom_title"].insert(0, getattr(self.options, "custom_title"))
next_row += 1
# Draw the "Tournament Settings" box
tournament_settings_frame = LabelFrame(self.root, text="Tournament Settings", padx=20, pady=20)
tournament_settings_frame.grid(row=0, column=1, rowspan=2, sticky=N)
next_row = 0
for index, opt in enumerate(["change_server"]):
self.checks[opt] = IntVar()
c = Checkbutton(tournament_settings_frame, text=self.pretty_name(opt), variable=self.checks[opt], indicatoron=False)
c.grid(row=next_row, column=0, pady=2)
c.configure(command=self.checkbox_callback)
if getattr(self.options, opt, False):
c.select()
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["trackerserver_url"]):
self.labels[opt] = Label(tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = Entry(tournament_settings_frame)
self.entries[opt].grid(row=next_row, column=1, pady=2)
self.entries[opt].insert(0, getattr(self.options, opt, ""))
next_row += 1
paddings = {"read_from_server": 5, "write_to_server": 120}
callbacks = {"read_from_server":self.read_callback, "write_to_server":self.write_callback}
for index, opt in enumerate(["read_from_server", "write_to_server"]):
self.checks[opt] = IntVar()
c = Checkbutton(tournament_settings_frame, text=self.pretty_name(opt), variable=self.checks[opt], indicatoron=False)
c.grid(row=next_row, column=index, pady=2, padx=paddings[opt])
c.configure(command=callbacks[opt])
if getattr(self.options, opt, False):
c.select()
next_row += 1
for index, opt in enumerate(["server_connect_label"]):
self.labels[opt] = Label(self.root, text="", width=len(self.connection_labels["fail"]))
self.labels[opt].grid(row=next_row, pady=2, columnspan=2, in_=tournament_settings_frame)
next_row += 1
for index, opt in enumerate(["twitch_name"]):
self.labels[opt] = Label(tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = ttk.Combobox(tournament_settings_frame, width=40)
self.entries[opt].set(getattr(self.options, opt, ""))
self.entries[opt].bind("<<ComboboxSelected>>", self.trim_name)
self.entries[opt].grid(row=next_row, column=1)
next_row += 1
# Generate text options by looping over option types
for index, opt in enumerate(["read_delay", "trackerserver_authkey"]):
self.labels[opt] = Label(tournament_settings_frame, text=self.pretty_name(opt))
self.labels[opt].grid(row=next_row, pady=2)
self.entries[opt] = Entry(tournament_settings_frame)
self.entries[opt].grid(row=next_row, column=1, pady=2)
self.entries[opt].insert(0, getattr(self.options, opt, ""))
next_row += 1
def authkey_fn():
self.entries["trackerserver_authkey"].delete(0, last=END)
twitch_client_id = self.get_server_twitch_client_id()
if twitch_client_id is not None:
webbrowser.open("https://api.twitch.tv/kraken/oauth2/authorize?response_type=token&client_id=" + twitch_client_id + "&redirect_uri=" +
self.entries['trackerserver_url'].get() + "/tracker/setup&scope=", autoraise=True)
else:
# TODO: show an error
pass
self.buttons["authkey_button"] = Button(
tournament_settings_frame,
text="Get an authkey",
command=authkey_fn
)
self.buttons["authkey_button"].grid(row=next_row, column=1, pady=5)
# Check for coherency in options with priority to read
self.read_callback()
# Disable some textboxes if needed
self.checkbox_callback()
buttonframe = LabelFrame(self.root, bd=0, padx=5, pady=5)
buttonframe.grid(row=2, column=1)
# Save and cancel buttons
save = Button(
buttonframe,
text="Save",
command=self.save_callback
)
save.grid(row=0, column=0, padx=5)
cancel = Button(
buttonframe,
text="Cancel",
command=self.root.destroy
)
cancel.grid(row=0, column=1, padx=5)
# We're going to jump through a lot of hoops so we can position the options window on top of the tracker...
# ... WITHOUT going off the edge of the screen
# First we start out placing ourselves at the tracker's position
x_pos = getattr(self.options, "x_position")
y_pos = getattr(self.options, "y_position")
# Now we make ourselves invisible and fullscreen (this is a hack to measure the size and position of the monitor)
# We can't use the "screenwidth" and "screenheight" functions because they only give info on the primary display!
self.root.geometry('+%d+%d' % (x_pos, y_pos))
self.root.attributes("-alpha", 00)
if platform.system() == "Windows":
self.root.state("zoomed")
self.root.update()
else:
self.root.attributes("-fullscreen", True)
# For some reason using 'update' here affects the actual window height we want to get later
self.root.update_idletasks()
# Our current width and height are now our display's width and height
screen_width = self.root.winfo_width()
screen_height = self.root.winfo_height()
# Get the upper left corner of the monitor
origin_x = self.root.winfo_x()
origin_y = self.root.winfo_y()
# Now we get out of invisible fullscreen mode
self.root.attributes("-alpha", 0xFF)
if platform.system() == "Windows":
self.root.state("normal")
else:
self.root.attributes("-fullscreen", False)
self.root.update()
# Here's the actual size of the window we're drawing
window_width = self.root.winfo_width()
window_height = self.root.winfo_height()
# Now we can make sure we don't go off the sides
max_x = origin_x + screen_width - window_width - 50
max_y = origin_y + screen_height - window_height - 50
x_pos = min(x_pos, max_x)
y_pos = min(y_pos, max_y)
# Clamp origin after clamping the other side, so that if our window is too big we lose the bottom/right instead of top/left
x_pos = max(x_pos, origin_x)
y_pos = max(y_pos, origin_y)
self.root.geometry('+%d+%d' % (x_pos, y_pos))
self.root.update()
self.root.focus_force()
# We're polling this queue for network results 10 times per second. This avoids blocking the main thread when we talk to the server
self.root.after(100, self.process_network_results())
# Start the main loop
mainloop()
|
futu_broker_hk.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Futu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rqalpha.interface import AbstractBroker
from rqalpha.const import DEFAULT_ACCOUNT_TYPE
from rqalpha.events import EVENT, Event
from rqalpha.model.order import *
from rqalpha.model.base_position import Positions
from rqalpha.model.portfolio import Portfolio
from rqalpha.model.trade import *
from rqalpha.utils.i18n import gettext as _
from .futu_utils import *
from time import sleep
from threading import Thread
from futuquant import OpenHKTradeContext
class FUTUBrokerHK(AbstractBroker):
"""
FUTUBrokerHK 对象用于对接futu港股的仿真和真实交易
设计思路:
1. 帐户的初始资金需要在rqalpha框架下的config中设置 config.base.stock_starting_cash
不与futu的帐户信息同步, 一方面是不影响长期自动运行时计算的收益率等指标,另一方面也为了控制策略脚本对futu实际帐户资金的占用.
2. 初始化时会同步一次futu帐户的持仓数据, 后续状态完全由rqalpha框架内部维护状态, 故策略中记录的持仓有可能与用户实际futu帐户不一致
3. 下单 ,撤单调后,脚本中会定时检查该订单在futu环境中的状态, 产生对应的event事件,可能存在延时。
"""
def __init__(self, env, mod_config):
self._env = env
self._mod_config = mod_config
self._portfolio = None
self._open_order = []
self._env.event_bus.add_listener(EVENT.PRE_BEFORE_TRADING, self._pre_before_trading)
self._env.event_bus.add_listener(EVENT.PRE_AFTER_TRADING, self._pre_after_trading)
# futu api创建及参数
self._trade_context = OpenHKTradeContext(self._mod_config.api_svr.ip, self._mod_config.api_svr.port)
self._trade_envtype = 1 # futu交易 envtype : 0 = 实盘 1 = 仿真
if IsRuntype_RealTrade():
self._trade_envtype = 0
thread_order_check = Thread(target=self._thread_order_check)
thread_order_check.setDaemon(True)
thread_order_check.start()
def get_portfolio(self):
"""
获取投资组合。系统初始化时,会调用此接口,获取包含账户信息、净值、份额等内容的投资组合
:return: Portfolio
"""
if self._portfolio is not None:
return self._portfolio
self._portfolio = self._init_portfolio()
if not self._portfolio._accounts:
raise RuntimeError("accout config error")
return self._portfolio
def submit_order(self, order):
"""
提交订单。在当前版本,RQAlpha 会生成 :class:`~Order` 对象,再通过此接口提交到 Broker。
TBD: 由 Broker 对象生成 Order 并返回?
"""
print("FUTUBrokerHK.submit_order:{}".format(order))
if order.type == ORDER_TYPE.MARKET:
raise RuntimeError("submit_order not support ORDER_TYPE.MARKET")
account = self._get_account(order.order_book_id)
self._env.event_bus.publish_event(Event(EVENT.ORDER_PENDING_NEW, account=account, order=order))
order.active()
# 发起futu api接口请求
futu_order_side = 0 if order.side == SIDE.BUY else 1
futu_order_type = 0 # 港股增强限价单
ret_code, ret_data = self._trade_context.place_order(order.price, order.quantity, order.order_book_id,
futu_order_side, futu_order_type, self._trade_envtype)
# 事件通知
if ret_code != 0:
order.mark_rejected("futu api req err:{} ".format(ret_code))
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_REJECT, account=account, order=order))
else:
futu_order_id = ret_data.loc[0, 'orderid']
self._open_order.append((futu_order_id, order))
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_PASS, account=account, order=order))
sleep(0.1)
self._check_open_orders(futu_order_id)
def cancel_order(self, order):
"""
撤单。
:param order: 订单
:type order: :class:`~Order`
"""
account = self._get_account(order.order_book_id)
futu_order_id = self._get_futu_order_id(order)
if futu_order_id is None:
return
# 立即检查一次订单状态
self._check_open_orders(futu_order_id)
if order.is_final():
return
self._env.event_bus.publish_event(Event(EVENT.ORDER_PENDING_CANCEL, account=account, order=order))
ret_code, ret_data = self._trade_context.set_order_status(0, futu_order_id, self._env) # 0 = 撤单
if ret_code != 0:
self._env.event_bus.publish_event(Event(EVENT.ORDER_CANCELLATION_REJECT, account=account, order=order))
else:
sleep(0.1)
self._check_open_orders(futu_order_id) # 提交请求后,立即再检查一次状态
def get_open_orders(self, order_book_id=None):
"""
[Required]
获得当前未完成的订单。
:return: list[:class:`~Order`]
"""
if order_book_id is None:
return [order for __, order in self._open_orders]
else:
return [order for __, order in self._open_orders if order.order_book_id == order_book_id]
def _pre_before_trading(self, event):
print("broker before_trading")
def _pre_after_trading(self, event):
# 收盘时清掉未完成的订单
for __, order in self._open_order:
order.mark_rejected(_(u"Order Rejected: {order_book_id} can not match. Market close.").format(
order_book_id=order.order_book_id
))
account = self._env.get_account(order.order_book_id)
self._env.event_bus.publish_event(Event(EVENT.ORDER_UNSOLICITED_UPDATE, account=account, order=order))
self._open_orders = []
print("broker after_trading")
def _check_open_orders(self, futu_order_id=None):
if len(self._open_order) == 0:
return
ret_code, pd_data = self._trade_context.order_list_query('', self._trade_envtype)
if ret_code != 0:
return
ft_orders = []
if futu_order_id is not None:
ft_orders.append(futu_order_id)
else:
for (fid, __) in self._open_order:
ft_orders.append(fid)
for fid in ft_orders:
pd_find = pd_data[pd_data.orderid == fid]
if len(pd_find) != 1:
continue
order = self._get_order_by_futu_id(fid)
account = self._get_account(order.order_book_id)
if order is None:
continue
ct_amount = 0 # 期货用的,期货分平当天的仓位和以前的仓位
price = order.avg_price # 分多笔成交下的平均值
trade = Trade.__from_create__(
order_id=order.order_id,
price=price,
amount=0,
side=order.side,
position_effect=order.position_effect,
order_book_id=order.order_book_id,
frozen_price=order.frozen_price,
close_today_amount=ct_amount,
commission=0.,
tax=0., trade_id=None
)
trade._commission = 0
trade._tax = 0
row = pd_find.iloc[0]
ft_status = int(row['status'])
if ft_status == 2 or ft_status == 3: # 部分成交 | 全部成交
qty_deal_last = order.quantity - order.unfilled_quantity
qty_deal_new = int(row['dealt_qty'])
if qty_deal_last == qty_deal_new: # 记录的成交数量与上次相同
continue
trade._amount = qty_deal_new - qty_deal_last
order.fill(trade)
self._env.event_bus.publish_event(Event(EVENT.TRADE, account=account, trade=trade, order=order))
if ft_status == 3:
self._remove_open_order_by_futu_id(fid)
elif ft_status == 5: # 下单失败
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_REJECT, account=account, order=order))
self._remove_open_order_by_futu_id(fid)
elif ft_status == 6: # 6=已撤单
order.mark_cancelled(_(u"{order_id} order has been cancelled by user.").format(order_id=order.order_id))
self._env.event_bus.publish_event(Event(EVENT.ORDER_CANCELLATION_PASS, account=account, order=order))
self._remove_open_order_by_futu_id(fid)
elif ft_status == 4 or ft_status == 7: # 4=已失效 7=已删除
reason = _(u"Order Cancelled: code = {order_book_id} ft_status = {ft_status} ").format(
order_book_id=order.order_book_id, ft_status=ft_status)
order.mark_rejected(reason)
self._env.event_bus.publish_event(Event(EVENT.ORDER_CREATION_REJECT, account=account, order=order))
self._remove_open_order_by_futu_id(fid)
else:
pass # 8 = 等待开盘 21= 本地已发送 22=本地已发送,服务器返回下单失败、没产生订单 23=本地已发送,等待服务器返回超时
def _get_futu_positions(self, env):
StockPosition = env.get_position_model(DEFAULT_ACCOUNT_TYPE.STOCK.name)
positions = Positions(StockPosition)
ret, pd_data = self._trade_context.position_list_query(self._trade_envtype)
if ret != 0:
return None
for i in range(len(pd_data)):
row = pd_data.iloc[i]
code_str = str(row['code'])
pos_state = {}
pos_state['order_book_id'] = code_str
pos_state['quantity'] = int(row['qty'])
pos_state['avg_price'] = float(row['cost_price'])
pos_state['non_closable'] = 0
pos_state['frozen'] = int(row['qty']) - int(row['can_sell_qty'])
pos_state['transaction_cost'] = 0
item = positions.get_or_create(code_str)
item.set_state(pos_state)
return positions
def _init_portfolio(self):
accounts = {}
config = self._env.config
start_date = config.base.start_date
total_cash = 0
for account_type, stock_starting_cash in six.iteritems(config.base.accounts):
if account_type == DEFAULT_ACCOUNT_TYPE.STOCK.name:
# stock_starting_cash = config.base.accounts
if stock_starting_cash == 0:
raise RuntimeError(_(u"stock starting cash can not be 0, using `--stock-starting-cash 1000`"))
all_positons = self._get_futu_positions(self._env)
if all_positons is None:
raise RuntimeError("_init_portfolio fail")
StockAccount = self._env.get_account_model(DEFAULT_ACCOUNT_TYPE.STOCK.name)
accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name] = StockAccount(stock_starting_cash, all_positons)
total_cash += stock_starting_cash
else:
raise NotImplementedError
return Portfolio(start_date, 1, total_cash, accounts)
def _get_account(self, order_book_id):
# account = self._env.get_account(order_book_id)
# for debug
account = self._env.portfolio.accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name]
return account
def _get_futu_order_id(self, order):
for fid, order_item in self._open_order:
if order_item is order:
return fid
return None
def _get_order_by_futu_id(self, futu_order_id):
for fid, order_item in self._open_order:
if futu_order_id == fid:
return order_item
return None
def _remove_open_order_by_futu_id(self, futu_order_id):
order = self._get_order_by_futu_id(futu_order_id)
if order is not None:
self._open_order.remove((futu_order_id, order))
def _thread_order_check(self):
while True:
if len(self._open_order) == 0:
print("broker:_thread_order_check None")
sleep(5)
else:
self._check_open_orders()
sleep(1)
|
planificador_merge_threading.py
|
import Sensor_merge_threading
import random
import time
import numpy
import sys
import threading
def alarmas(lista, lista_tiempos_promedios, num_process):
tiempo_inicio = time.time()
while (time.time()-tiempo_inicio < 5):
for i in range (0,num_process):
if(lista[i] > 0):
tiempo = time.time() - lista[i]
lista[i]= -1
lista_tiempos_promedios.append(tiempo)
if __name__=="__main__":
num_process = 1
tamano_de_matriz = 10
if len(sys.argv) >2:
temp1=int(sys.argv[2])
if sys.argv[2].isdigit() and temp1 > 10 and temp1 < 100000:
tamano_de_matriz=temp1#cantidad de valores
if len(sys.argv) >1:
temp2=int(sys.argv[1])
if sys.argv[1].isdigit() and temp2 > 1 and temp2 < 100:
num_process = temp2 #cantidad de sensores
tiempo_limite = 5 #tiempo que corre alarmas
lista=list(range(num_process))
lista = [-1 for i in range(num_process)] #inicializamos con -1
lista_tiempos_promedios = []
process_list = []
for p in range(num_process):
proc = Sensor_merge_lineal.Sensor(lista, p, tamano_de_matriz)
process_list.append(proc)
for x in range(num_process):
process_list[x].start()
alarmas= threading.Thread(target=alarmas, args=(lista, lista_tiempos_promedios, num_process))
alarmas.start()
#alarmas.join()
time.sleep(tiempo_limite)
#for x in range(num_process):
# process_list[x].terminate()
#alarmas.terminate()
media = numpy.mean(lista_tiempos_promedios) #promedio de ti
print(media)
for x in range(num_process):
process_list[x].join()
alarmas.join()
|
dataengine_configure.py
|
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import datalab.actions_lib
import datalab.fab
import datalab.meta_lib
import json
import logging
import multiprocessing
import os
import sys
import traceback
import subprocess
from Crypto.PublicKey import RSA
from fabric import *
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
slave_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'], slave_name)
try:
logging.info('[CREATING DATALAB SSH USER ON SLAVE NODE]')
print('[CREATING DATALAB SSH USER ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
(slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['datalab_ssh_user'], sudo_group)
try:
subprocess.run("~/scripts/{}.py {}".format('create_ssh_user', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON SLAVE]')
logging.info('[INSTALLING USERs KEY ON SLAVE]')
additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem",
json.dumps(additional_config), data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('install_user_key', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to install user ssh key on slave.", str(err))
sys.exit(1)
try:
logging.info('[CLEANING INSTANCE FOR MASTER NODE]')
print('[CLEANING INSTANCE FOR MASTER NODE]')
params = '--hostname {} --keyfile {} --os_user {} --application {}' \
.format(slave_hostname, keyfile_name, data_engine['datalab_ssh_user'], os.environ['application'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_clean_instance', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to clean slave instance..", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON SLAVE NODE]')
print('[CONFIGURE PROXY ON ON SLAVE NODE]')
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(slave_hostname, slave_name, keyfile_name, json.dumps(additional_config),
data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_proxy', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON SLAVE NODE]')
print('[INSTALLING PREREQUISITES ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}". \
format(slave_hostname, keyfile_name, data_engine['datalab_ssh_user'], data_engine['region'],
edge_instance_private_hostname)
try:
subprocess.run("~/scripts/{}.py {}".format('install_prerequisites', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['datalab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'slave')
try:
subprocess.run("~/scripts/{}.py {}".format('configure_dataengine', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to configure slave node.", str(err))
sys.exit(1)
def clear_resources():
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
AzureActions.remove_instance(data_engine['resource_group_name'], slave_name)
AzureActions.remove_instance(data_engine['resource_group_name'], data_engine['master_node_name'])
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
AzureMeta = datalab.meta_lib.AzureMeta()
AzureActions = datalab.actions_lib.AzureActions()
print('Generating infrastructure names and tags')
data_engine = dict()
if 'exploratory_name' in os.environ:
data_engine['exploratory_name'] = os.environ['exploratory_name']
else:
data_engine['exploratory_name'] = ''
if 'computational_name' in os.environ:
data_engine['computational_name'] = os.environ['computational_name']
else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = os.environ['conf_service_base_name']
data_engine['resource_group_name'] = os.environ['azure_resource_group_name']
data_engine['region'] = os.environ['azure_region']
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['vpc_name'] = os.environ['azure_vpc_name']
data_engine['user_name'] = os.environ['edge_user_name']
data_engine['project_name'] = os.environ['project_name']
data_engine['project_tag'] = data_engine['project_name']
data_engine['endpoint_name'] = os.environ['endpoint_name']
data_engine['endpoint_tag'] = data_engine['endpoint_name']
data_engine['private_subnet_name'] = '{}-{}-{}-subnet'.format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'])
data_engine['private_subnet_cidr'] = AzureMeta.get_subnet(data_engine['resource_group_name'],
data_engine['vpc_name'],
data_engine['private_subnet_name']).address_prefix
data_engine['master_security_group_name'] = '{}-{}-{}-de-master-sg'.format(
data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
data_engine['slave_security_group_name'] = '{}-{}-{}-de-slave-sg'.format(
data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
data_engine['cluster_name'] = '{}-{}-{}-de-{}'.format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'],
data_engine['computational_name'])
data_engine['master_node_name'] = '{}-m'.format(data_engine['cluster_name'])
data_engine['slave_node_name'] = '{}-s'.format(data_engine['cluster_name'])
data_engine['master_network_interface_name'] = '{}-nif'.format(data_engine['master_node_name'])
data_engine['master_size'] = os.environ['azure_dataengine_master_size']
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
data_engine['slave_size'] = os.environ['azure_dataengine_slave_size']
data_engine['datalab_ssh_user'] = os.environ['conf_os_user']
data_engine['notebook_name'] = os.environ['notebook_instance_name']
master_node_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
data_engine['master_node_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'])
edge_instance_private_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
edge_instance_name)
data_engine['edge_instance_dns_name'] = 'host-{}.{}.cloudapp.azure.com'.format(edge_instance_name,
data_engine['region'])
if os.environ['conf_network_type'] == 'private':
edge_instance_hostname = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
edge_instance_name)
else:
edge_instance_hostname = data_engine['edge_instance_dns_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
key = RSA.importKey(open(keyfile_name, 'rb').read())
data_engine['public_ssh_key'] = key.publickey().exportKey("OpenSSH").decode('UTF-8')
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATING DATA ATA LAB SSH USER ON MASTER NODE]')
print('[CREATING DATALAB SSH USER ON MASTER NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format \
(master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", initial_user,
data_engine['datalab_ssh_user'], sudo_group)
try:
subprocess.run("~/scripts/{}.py {}".format('create_ssh_user', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY ON MASTER]')
logging.info('[INSTALLING USERs KEY ON MASTER]')
additional_config = {"user_keyname": data_engine['project_name'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
master_node_hostname, os.environ['conf_key_dir'] + data_engine['key_name'] + ".pem", json.dumps(
additional_config), data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('install_user_key', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to install ssh user key on master.", str(err))
sys.exit(1)
try:
logging.info('[CLEANING INSTANCE FOR MASTER NODE]')
print('[CLEANING INSTANCE FOR MASTER NODE]')
params = '--hostname {} --keyfile {} --os_user {} --application {}' \
.format(master_node_hostname, keyfile_name, data_engine['datalab_ssh_user'], os.environ['application'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_clean_instance', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to clean master instance.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON MASTER NODE]')
print('[CONFIGURE PROXY ON ON MASTER NODE]')
additional_config = {"proxy_host": edge_instance_private_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(master_node_hostname, data_engine['master_node_name'], keyfile_name, json.dumps(additional_config),
data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_proxy', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON MASTER NODE]')
print('[INSTALLING PREREQUISITES ON MASTER NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}". \
format(master_node_hostname, keyfile_name, data_engine['datalab_ssh_user'], data_engine['region'],
edge_instance_private_hostname)
try:
subprocess.run("~/scripts/{}.py {}".format('install_prerequisites', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE MASTER NODE]')
print('[CONFIGURE MASTER NODE]')
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['datalab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'master')
try:
subprocess.run("~/scripts/{}.py {}".format('configure_dataengine', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure master node", str(err))
clear_resources()
sys.exit(1)
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=configure_slave, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure slave nodes", str(err))
clear_resources()
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
notebook_instance_ip = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
data_engine['notebook_name'])
additional_info = {
"computational_name": data_engine['computational_name'],
"master_node_hostname": master_node_hostname,
"notebook_instance_ip": notebook_instance_ip,
"instance_count": data_engine['instance_count'],
"master_node_name": data_engine['master_node_name'],
"slave_node_name": data_engine['slave_node_name'],
"tensor": False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(edge_instance_private_hostname,
keyfile_name,
data_engine['datalab_ssh_user'],
'spark',
data_engine['exploratory_name'],
json.dumps(additional_info))
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params), shell=True, check=True)
except:
datalab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure reverse proxy", str(err))
clear_resources()
sys.exit(1)
try:
ip_address = AzureMeta.get_private_ip_address(data_engine['resource_group_name'],
data_engine['master_node_name'])
spark_master_url = "http://" + ip_address + ":8080"
spark_master_access_url = "https://" + edge_instance_hostname + "/{}/".format(
data_engine['exploratory_name'] + '_' + data_engine['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(data_engine['service_base_name']))
print("Region: {}".format(data_engine['region']))
print("Cluster name: {}".format(data_engine['cluster_name']))
print("Master node shape: {}".format(data_engine['master_size']))
print("Slave node shape: {}".format(data_engine['slave_size']))
print("Instance count: {}".format(str(data_engine['instance_count'])))
with open("/root/result.json", 'w') as result:
res = {"hostname": data_engine['cluster_name'],
"instance_id": data_engine['master_node_name'],
"key_name": data_engine['key_name'],
"Action": "Create new Data Engine",
"computational_url": [
{"description": "Apache Spark Master",
"url": spark_master_access_url},
# {"description": "Apache Spark Master (via tunnel)",
# "url": spark_master_url}
]
}
print(json.dumps(res))
result.write(json.dumps(res))
except Exception as err:
datalab.fab.append_result("Error with writing results", str(err))
clear_resources()
sys.exit(1)
|
simulation.py
|
from itertools import count
from collections import namedtuple
from queue import Empty
from time import sleep
import multiprocessing as mp
import numpy as np
import cloudpickle # For pickling lambda functions and more
from huskarl.memory import Transition
from huskarl.core import HkException
# Packet used to transmit experience from environment subprocesses to main process
# The first packet of every episode will have reward set to None
# The last packet of every episode will have state set to None
RewardState = namedtuple('RewardState', ['reward', 'state'])
class Simulation:
"""Simulates an agent interacting with one of multiple environments."""
def __init__(self, create_env, agent, mapping=None):
self.create_env = create_env
self.agent = agent
self.mapping = mapping
def train(self, max_steps=100_000, instances=1, visualize=False, plot=None, max_subprocesses=0):
"""Trains the agent on the specified number of environment instances."""
self.agent.training = True
if max_subprocesses == 0:
# Use single process implementation
self._sp_train(max_steps, instances, visualize, plot)
elif max_subprocesses is None or max_subprocesses > 0:
# Use multiprocess implementation
self._mp_train(max_steps, instances, visualize, plot, max_subprocesses)
else:
raise HkException(f"Invalid max_subprocesses setting: {max_subprocesses}")
def _sp_train(self, max_steps, instances, visualize, plot):
"""Trains using a single process."""
# Keep track of rewards per episode per instance
episode_reward_sequences = [[] for i in range(instances)]
episode_step_sequences = [[] for i in range(instances)]
episode_rewards = [0] * instances
# Create and initialize environment instances
envs = [self.create_env() for i in range(instances)]
states = [env.reset() for env in envs]
for step in range(max_steps):
for i in range(instances):
if visualize: envs[i].render()
action = self.agent.act(states[i], i)
next_state, reward, done, _ = envs[i].step(action)
self.agent.push(Transition(states[i], action, reward, None if done else next_state), i)
episode_rewards[i] += reward
if done:
episode_reward_sequences[i].append(episode_rewards[i])
episode_step_sequences[i].append(step)
episode_rewards[i] = 0
if plot: plot(episode_reward_sequences, episode_step_sequences)
states[i] = envs[i].reset()
else:
states[i] = next_state
# Perform one step of the optimization
self.agent.train(step)
if plot: plot(episode_reward_sequences, episode_step_sequences, done=True)
def _mp_train(self, max_steps, instances, visualize, plot, max_subprocesses):
"""Trains using multiple processes.
Useful to parallelize the computation of heavy environments.
"""
# Unless specified set the maximum number of processes to be the number of cores in the machine
if max_subprocesses is None:
max_subprocesses = mp.cpu_count()
nprocesses = min(instances, max_subprocesses)
# Split instances into processes as homogeneously as possibly
instances_per_process = [instances//nprocesses] * nprocesses
leftover = instances % nprocesses
if leftover > 0:
for i in range(leftover):
instances_per_process[i] += 1
# Create a unique id (index) for each instance, grouped by process
instance_ids = [list(range(i, instances, nprocesses))[:ipp] for i, ipp in enumerate(instances_per_process)]
# Create processes and pipes (one pipe for each environment instance)
pipes = []
processes = []
for i in range(nprocesses):
child_pipes = []
for j in range(instances_per_process[i]):
parent, child = mp.Pipe()
pipes.append(parent)
child_pipes.append(child)
pargs = (cloudpickle.dumps(self.create_env), instance_ids[i], max_steps, child_pipes, visualize)
processes.append(mp.Process(target=_train, args=pargs))
# Start all processes
print(f"Starting {nprocesses} process(es) for {instances} environment instance(s)... {instance_ids}")
for p in processes: p.start()
# Keep track of rewards per episode per instance
episode_reward_sequences = [[] for i in range(instances)]
episode_step_sequences = [[] for i in range(instances)]
episode_rewards = [0] * instances
# Temporarily record RewardState instances received from each subprocess
# Each Transition instance requires two RewardState instances to be created
rss = [None] * instances
# Keep track of last actions sent to subprocesses
last_actions = [None] * instances
for step in range(max_steps):
# Keep track from which environments we have already constructed a full Transition instance
# and sent it to agent. This is to synchronize steps.
step_done = [False] * instances
while sum(step_done) < instances: # Steps across environments are synchronized
# Within each step, Transitions are received and processed on a first-come first-served basis
awaiting_pipes = [p for iid, p in enumerate(pipes) if step_done[iid] == 0]
ready_pipes = mp.connection.wait(awaiting_pipes, timeout=None)
pipe_indexes = [pipes.index(rp) for rp in ready_pipes]
# Do a round-robin over processes to best divide computation
pipe_indexes.sort()
for iid in pipe_indexes:
rs = pipes[iid].recv() # Receive a RewardState
# If we already had a RewardState for this environment then we are able to create and push a Transition
if rss[iid] is not None:
exp = Transition(rss[iid].state, last_actions[iid], rs.reward, rs.state)
self.agent.push(exp, iid)
step_done[iid] = True
rss[iid] = rs
# Check if episode is done
if rs.state is None:
# Episode is done - store rewards and update plot
rss[iid] = None
episode_reward_sequences[iid].append(episode_rewards[iid])
episode_step_sequences[iid].append(step)
episode_rewards[iid] = 0
if plot: plot(episode_reward_sequences, episode_step_sequences)
else:
# Episode is NOT done - act according to state and send action to the subprocess
action = self.agent.act(rs.state, iid)
last_actions[iid] = action
try:
pipes[iid].send(action)
# Disregard BrokenPipeError on last step
except BrokenPipeError as bpe:
if step < (max_steps - 1): raise bpe
if rs.reward: episode_rewards[iid] += rs.reward
# Train the agent at the end of every synchronized step
self.agent.train(step)
if plot: plot(episode_reward_sequences, episode_step_sequences, done=True)
def test(self, max_steps, visualize=True):
"""Test the agent on the environment."""
self.agent.training = False
# Create and initialize environment
env = self.create_env()
state = env.reset()
for step in range(max_steps):
if visualize: env.render()
action = self.agent.act(state)
next_state, reward, done, _ = env.step(action)
state = env.reset() if done else next_state
def _train(create_env, instance_ids, max_steps, pipes, visualize):
"""This function is to be executed in a subprocess"""
pipes = {iid: p for iid, p in zip(instance_ids, pipes)}
actions = {iid: None for iid in instance_ids} # Reused dictionary of actions
# Initialize environments and send initial state to agent in parent process
create_env = cloudpickle.loads(create_env)
envs = {iid: create_env() for iid in instance_ids}
for iid in instance_ids:
state = envs[iid].reset()
pipes[iid].send(RewardState(None, state))
# Run for the specified number of steps
for step in range(max_steps):
for iid in instance_ids:
# Get action from agent in main process via pipe
actions[iid] = pipes[iid].recv()
if visualize: envs[iid].render()
# Step environment and send experience to agent in main process via pipe
next_state, reward, done, _ = envs[iid].step(actions[iid])
pipes[iid].send(RewardState(reward, None if done else next_state))
# If episode is over reset the environment and transmit initial state to agent
if done:
state = envs[iid].reset()
pipes[iid].send(RewardState(None, state))
|
reconnect_test.py
|
from threading import Thread
from time import sleep
from hazelcast import ClientConfig
from hazelcast.exception import HazelcastError
from hazelcast.lifecycle import LIFECYCLE_STATE_DISCONNECTED, LIFECYCLE_STATE_CONNECTED
from hazelcast.util import AtomicInteger
from tests.base import HazelcastTestCase
from tests.util import configure_logging, event_collector
class ReconnectTest(HazelcastTestCase):
rc = None
def setUp(self):
configure_logging()
self.rc = self.create_rc()
self.cluster = self.create_cluster(self.rc)
def tearDown(self):
self.shutdown_all_clients()
self.rc.exit()
def test_start_client_with_no_member(self):
config = ClientConfig()
config.network_config.addresses.append("127.0.0.1:5701")
config.network_config.connection_attempt_limit = 2
config.network_config.connection_attempt_period = 0.1
with self.assertRaises(HazelcastError):
self.create_client(config)
def test_start_client_before_member(self):
Thread(target=self.cluster.start_member).start()
config = ClientConfig()
config.network_config.connection_attempt_limit = 10
self.create_client(config)
def test_restart_member(self):
member = self.cluster.start_member()
config = ClientConfig()
config.network_config.connection_attempt_limit = 10
client = self.create_client(config)
state = [None]
def listener(s):
state[0] = s
client.lifecycle.add_listener(listener)
member.shutdown()
self.assertTrueEventually(lambda: self.assertEqual(state[0], LIFECYCLE_STATE_DISCONNECTED))
self.cluster.start_member()
self.assertTrueEventually(lambda: self.assertEqual(state[0], LIFECYCLE_STATE_CONNECTED))
def test_listener_re_register(self):
member = self.cluster.start_member()
config = ClientConfig()
config.network_config.connection_attempt_limit = 10
client = self.create_client(config)
map = client.get_map("map")
collector = event_collector()
reg_id = map.add_entry_listener(added_func=collector)
self.logger.info("Registered listener with id %s", reg_id)
member.shutdown()
sleep(3)
self.cluster.start_member()
count = AtomicInteger()
def assert_events():
if client.lifecycle.is_live:
map.put("key-%d" % count.get_and_increment(), "value").result()
self.assertGreater(len(collector.events), 0)
else:
self.fail("Client disconnected...")
self.assertTrueEventually(assert_events)
def test_member_list_after_reconnect(self):
old_member = self.cluster.start_member()
config = ClientConfig()
config.network_config.connection_attempt_limit = 10
client = self.create_client(config)
old_member.shutdown()
new_member = self.cluster.start_member()
def assert_member_list():
self.assertEqual(1, len(client.cluster.members))
self.assertEqual(new_member.uuid, client.cluster.members[0].uuid)
self.assertTrueEventually(assert_member_list)
|
datarecorderfactory.py
|
"""
DataRecorder factory
"""
import time
import utils.remote as rmt
import optotrak.optotrakfactory as otf
def connect(datarecorderinstancetype=None,
optotrakinstancetype=None):
if datarecorderinstancetype is None:
datarecorderinstancetype = create_default_instance_type()
if optotrakinstancetype is None:
optotrakinstancetype = otf.create_default_instance_type()
if datarecorderinstancetype.mode == rmt.InstanceType.InProcess:
import datarecorder
optotrak = otf.connect(optotrakinstancetype)
return datarecorder.OptotrakDataRecorder(optotrak)
elif datarecorderinstancetype.mode == rmt.InstanceType.Pyro4Proxy:
return rmt.connect(datarecorderinstancetype.URI())
elif datarecorderinstancetype.mode == rmt.InstanceType.ChildProcPyro4Proxy:
from multiprocessing import Process
import optotrak.datarecorderserver as drs
proc = Process(target=drs.start, args=(datarecorderinstancetype, optotrakinstancetype))
proc.start()
time.sleep(0.5)
return rmt.connect(datarecorderinstancetype.URI())
else:
raise NotImplementedError()
|
ftpserver.py
|
#!/usr/bin/python3
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.handlers import ThrottledDTPHandler
from pyftpdlib.servers import FTPServer
# import additions
import sys
import os
import errno
import socket
import threading
import subprocess
import time
import requests
import json
import mimetypes
from tinydb import TinyDB, where, Query
from urllib.parse import urlparse
from copy import deepcopy
from datetime import datetime
# these are my global variables
# userbase = auth.Userbase()
python = sys.executable
PORT = 2121
server = None
gen_snapshot = False
exchange_connect_status = False
CAPERROR = False
exchange_url = ''
total_share_size = 0
server_running_status = False
app_is_running = True
ls = os.listdir
pwd = os.getcwd()
# anonymous user class
# class AnonymousUser:
# """Each instance of this class represents an anonymous user
# * name : anonymous (as both kinds of users are in same database)
# * homedir | * permission
# * msg_login | * msg_quit
# *
# * save_details() : save current details
# """
# def __init__(self, dic):
# k = list(dic.keys())
# if 'homedir' in k and \
# 'permission' in k:
# self.record = deepcopy(dic)
# if not 'msg_quit' in k:
# self.record['msg_quit'] = ''
# if not 'msg_login' in k:
# self.record['msg_login'] = ''
# self.record['name'] = 'anonymous'
# self.name = self.record['name']
# self.homedir = self.record['homedir']
# self.permission = self.record['permission']
# self.msg_login = self.record['msg_login']
# self.msg_quit = self.record['msg_quit']
# def save_details(self):
# dbase = TinyDB('user_database.json')
# if not (dbase.count(where('name') == self.record['name'])) == 0:
# dbase.remove(where('name') == self.record['name'])
# dbase.insert(self.record)
# dbase.close()
class FTPSettings:
"""Class to handle FTP Settings
There are following attributes that are saved in settings file
* server_name | name of the server
* server_banner | message displayed on connecting first time (FTPHandler)
* port | port (default 2121)
* max_cons | maximum connections to the server (FTPServer)
* max_cons_per_ip | maximum connections per ip address (FTPServer)
* max_upload_speed | maximum upload speed on server (take care of hard drive i/o and network speed) (ThrottledDTPHandler)
* max_download_speed | maximum download speed (auto_sized_buffers are True by default) (ThrottledDTPHandler)
* permit_outside_lan | FTPHandler (permit_foreign_addresses) [ Not handling due to lack of knowledge ]
* homedir | Anonymous home directory (added for this minimal version)
"""
def __init__(self):
"""read data from settings file"""
dbase = TinyDB('settings.json')
if len(dbase.all()) == 0:
self.server_name = 'whoami'
self.server_banner = "Welcome..."
self.port = 2121
self.max_cons = 10
self.max_cons_per_ip = 2
self.max_upload_speed = 2097152 # approximately 2 Mbps in bytes
self.max_download_speed = 10 # to resrtict uploads from public on server,
# when write permission is allowed
# self.permit_outside_lan = False
self.exchange_url = ""
self.homedir = ""
else:
try:
rec = dbase.all()[0]
self.server_name = rec['server_name']
self.server_banner = rec['server_banner']
self.port = rec['port']
self.max_cons = rec['max_cons']
self.max_cons_per_ip = rec['max_cons_per_ip']
self.max_upload_speed = rec['max_upload_speed']
self.max_download_speed = rec['max_download_speed']
self.exchange_url = rec['exchange_url']
self.homedir = rec['homedir']
except KeyError:
self.restore_default_settings()
# permit outside lan has not been included
dbase.close()
def reload_settings(self):
self.__init__()
def save_settings(self):
"""save settings to settings file"""
dbase = TinyDB('settings.json')
dbase.purge()
rec={}
rec['server_name'] = self.server_name
rec['server_banner'] = self.server_banner
rec['port'] = self.port
rec['max_cons'] = self.max_cons
rec['max_cons_per_ip'] = self.max_cons_per_ip
rec['max_upload_speed'] = self.max_upload_speed
rec['max_download_speed'] = self.max_download_speed
# f['permit_outside_lan'] = self.permit_outside_lan
rec['exchange_url'] = self.exchange_url
rec['homedir'] = self.homedir
dbase.insert(rec)
dbase.close()
mylog("Settings modified")
def restore_default_settings(self):
dbase = TinyDB('settings.json')
dbase.purge()
dbase.close()
self.__init__()
# here the global key functions
def mylog(ar):
f = open('log.txt', 'a')
f.write(str(datetime.now()) + " " + ar + "\n")
f.close()
def load_settings():
return FTPSettings()
def start_server():
server.serve_forever()
def stop_server():
server.close_all()
def is_port_available(port):
port = int(port)
try:
# connecting on localhost, previously it was 0.0.0.0, to satisfy Windows
result = socket.create_connection(('localhost', port), 2)
except OverflowError:
mylog ("Socket out of range")
except (ConnectionError, ConnectionRefusedError):
# Connection refused error to handle windows systems:(
return True
except Exception as e:
mylog('error while port check')
return result==0
def get_ip_address():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
ip = s.getsockname()[0]
s.close()
return ip
except Exception as e:
try:
ip = socket.gethostbyname(socket.getfqdn())
return ip
except Exception as e:
mylog("cannot determine ip address" + str(e))
return ""
return ""
class generate_system_snapshot(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def do_the_job(self):
global exchange_connect_status, gen_snapshot, total_share_size, app_is_running
self.dbdict = {}
self.dbdict["filedata"] = {}
self.dbtable = self.dbdict["filedata"]
# self.dic = dict()
self.totalsize = 0
self.filecount = 0
def path_to_dict(path, l):
if ( not gen_snapshot ) or (not app_is_running ):
# not generating snapshot
return
try:
if os.path.isdir(path):
for x in ls(path):
path_to_dict(os.path.join(path, x), l)
else:
self.filecount += 1
size = os.path.getsize(path)
filename = os.path.basename(path)
self.dbtable[str(self.filecount)] = { "filename":filename, "size":size, "fullpath":path[l:-len(filename)], "mimetype":mimetypes.guess_type(filename)[0] }
# self.dic[os.path.basename(path)] = { "size" : os.path.getsize(path), "fullpath" : path[l:] }
self.totalsize += size
except Exception as e:
raise e
if not gen_snapshot:
return
shared_dir = load_settings().homedir
p = os.path.abspath(shared_dir)
path_to_dict(p, len(p))
self.dbdict["metadata"] = {}
self.metadata = self.dbdict["metadata"]
self.metadata['1'] = { "totalfiles":self.filecount, "totalsize":self.totalsize }
total_share_size = self.totalsize
# earlier, tinydb insert function was used to insert records into database in json format
# which was extremely slow
# now, the database is created manually, in the format tinydb keeps them.
f = open('snapshot.json', 'w')
f.write(json.dumps(self.dbdict, indent=2))
f.close()
mylog("Snapshot generated")
def upload_file(self):
global exchange_url, CAPERROR
mylog("Starting upload")
try:
dest_dir = load_settings().homedir
dest_path = os.path.join(dest_dir, 'snapshot.json')
dest_file = open(dest_path, 'wb')
source_file = open('snapshot.json', 'rb')
dest_file.write(source_file.read())
source_file.close()
dest_file.close()
# now notify you dad to take the parcel
mylog('Asking dad to take the parcel')
f = open('session_id', 'r')
sessionid = f.read().strip()
f.close()
uri=exchange_url+'/cgi-bin/actions.py'
headers = {'user-agent':'21Lane'}
r = requests.post(url=uri, data={'action':'snapshot'}, cookies={'session_id':sessionid}, headers=headers, timeout=5, proxies={'socks':None, 'http':None})
if r.status_code==200:
if r.text.strip() == 'ok':
mylog('Snapshot file uploaded successfully.')
os.remove(dest_path)
elif r.text.strip() == 'CAPERROR':
mylog("exchange raised cap error")
CAPERROR = True
else:
mylog("Some error occured while uploading snapshot.")
except (requests.exceptions.ConnectionError, ConnectionAbortedError, requests.exceptions.Timeout) as e:
mylog("Network error while periodical uploads.")
raise e
except Exception as e:
# first close any open file to avoid permissions error in windows, and other similar errors
try:
if not f.closed:
f.close()
if not dest_file.closed:
dest_file.close()
if not source_file.closed:
source_file.close
except NameError:
pass
if 'session_id' in ls(pwd):
os.remove('session_id')
mylog(str(e) + ' ' + 'is the error')
raise e
def getThreadName(self):
return self.thread_name
def run(self):
self.thread_name = self.getName()
global gen_snapshot, app_is_running
cur_time = time.time()
wait_time = 60*60 # one hour gap
next_time = cur_time
upload_time = time.time()
while True and app_is_running:
if not gen_snapshot:
mylog("Ending snapshot thread")
break
if cur_time >= next_time:
mylog('Generating snapshot')
self.do_the_job()
next_time += wait_time
if exchange_connect_status == True:
self.upload_file()
else:
print("not uploading file")
# breathe, don't choke while you run
time.sleep(1)
cur_time += 1
mylog("Snapshot creator Thread quits")
class myserver(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
self.thread_name = self.getName()
global server, PORT, server_running_status, exchange_url
conf = load_settings()
exchange_url = conf.exchange_url
try:
authorizer = DummyAuthorizer()
authorizer.add_anonymous(conf.homedir, msg_login="Welcome to 21Lane sharing.", msg_quit="Thanks for using.")
except Exception as e:
mylog("My server caught an exception")
sys.exit(1)
ThrottledDTPHandler.write_limit = conf.max_upload_speed
ThrottledDTPHandler.read_limit = conf.max_download_speed
FTPHandler.dtp_handler = ThrottledDTPHandler
FTPHandler.banner = conf.server_banner
FTPServer.max_cons = conf.max_cons
FTPServer.max_cons_per_ip = conf.max_cons_per_ip
FTPHandler.authorizer = authorizer
# FTPHandler.permit_foreign_addresses = conf.permit_outside_lan
if is_port_available(conf.port):
server = FTPServer(('0.0.0.0', conf.port), FTPHandler)
else:
return
server_running_status = True
mylog('server status ' + str(server_running_status))
server.serve_forever()
def getport(self):
return str(PORT)
def getThreadName(self):
return self.thread_name
# handling with GUI
from PyQt5.QtWidgets import (QWidget, QAction, qApp, QPushButton, QApplication,
QMainWindow, QTextEdit, QMessageBox, QInputDialog, QLineEdit, QLabel, QVBoxLayout,
QHBoxLayout, QGridLayout, QFrame, QSlider, QSpinBox, QFileDialog, QSplitter)
from PyQt5.QtGui import QIcon, QFont
from PyQt5.Qt import QDesktopServices, QUrl
from PyQt5.QtCore import Qt, QCoreApplication, pyqtSignal, QObject
class CapErrorNotifier(QObject):
s = pyqtSignal() # signal
class MainUI(QWidget):
def __init__(self):
super().__init__()
self.srv = None
self.exchange_process = None
self.capThread = None
self.initUI()
def initUI(self):
mylog ("Starting ui")
self.itHurtsLabel = QLabel(self)
self.itHurtsLabel.setText("Don't randomly hit your mouse. It hurts!'")
self.itHurtsLabel.setFont(QFont('SansSerif', 10))
self.itHurtsLabel.setStyleSheet("padding: 5px;")
self.itHurtsLabel.setWordWrap(False)
self.mainbtn = QPushButton("Start sharing", self)
self.mainbtn.setStyleSheet("background-color: #22a7f0; color: white; border: none; padding: 5px;")
self.mainbtn.setCheckable(True)
self.mainbtn.clicked[bool].connect(self.check_server)
self.exchangebtn = QPushButton("View other users", self)
self.exchangebtn.setStyleSheet("background-color: #bdc3c7; color: white; border: none; padding: 5px 15px;")
self.exchangebtn.setCheckable(True)
self.exchangebtn.setEnabled(False)
# port check tool
portCheck = QAction(QIcon('icons/ic_search_black_48dp_1x.png'), 'Port &Check', self)
portCheck.setShortcut('Ctrl+F')
portCheck.setToolTip("Port Scan : Ctrl+F")
portCheck.setStatusTip("Check whether a port is available")
portCheck.triggered.connect(self.checkPortUI)
# portCheck.triggered.connect(portCheckUI())
# connect to 21Exchange
self.exchange = QAction(QIcon('icons/disconnect.png'), 'Connect to &Exchange...', self)
self.exchange.setShortcut('Ctrl+E')
self.exchange.setToolTip("Connect to exchange : Ctrl+E")
self.exchange.setStatusTip("Connect to 21Exchange servers on local network.")
self.exchange.triggered.connect(self.exchange_connect)
# disconnect from 21exchange
self.disconnect = QAction(QIcon('icons/ic_wb_cloudy_black_48dp_2x.png'), 'Connect to &Exchange...', self)
self.disconnect.setShortcut('Ctrl+E')
self.disconnect.setToolTip("Connect to exchange : Ctrl+E")
self.disconnect.setStatusTip("Connect to 21Exchange servers on local network.")
self.disconnect.triggered.connect(self.exchange_disconnect)
# help
self.helpAction = QAction(QIcon('icons/ic_help_outline_black_24dp_2x.png'), '&Help', self)
self.helpAction.setToolTip("Help")
self.helpAction.setShortcut("F1")
self.helpAction.setStatusTip("Help")
self.helpAction.triggered.connect(self.show_help)
# git action
self.gitAction = QAction(QIcon('icons/GitHub-Mark-64px.png'), 'View on &Github', self)
self.gitAction.setToolTip("See code")
self.gitAction.setStatusTip("Github repo")
self.gitAction.triggered.connect(self.show_git)
# self.toolbar = self.addToolBar("Quick Access")
# self.toolbar.setToolTip("Controls toolbar")
# # self.toolbar.addAction(exitAction)
# self.toolbar.addAction(portCheck)
# self.toolbar.addAction(self.gitAction)
# self.toolbar.addAction(self.helpAction)
# self.toolbar.addAction(self.exchange)
# self.snapshot_thread = None
# self.srv = None
# Configuration options
self.nameLabel = QLabel(self); self.nameLabel.setText("Public Name");
self.portLabel = QLabel(self); self.portLabel.setText("Port")
self.maxconLabel = QLabel(self); self.maxconLabel.setText("Max. connections (total) allowed")
self.maxconperipLabel = QLabel(self); self.maxconperipLabel.setText("Max. connections per IP allowed")
self.speedLabel = QLabel(self); self.speedLabel.setText("Bandwidth limit")
self.exchangeLabel = QLabel(self); self.exchangeLabel.setText("Exchange URL")
self.speedDisplay = QLabel(self)
self.nameInput = QLineEdit(self); self.nameInput.setPlaceholderText("Max. 16 characters"); self.nameInput.setMaxLength(16)
self.portInput = QSpinBox(self); self.portInput.setRange(0, 65535); self.portInput.setValue(2121)
self.maxconInput = QSpinBox(self)
self.maxconperipInput = QSpinBox(self)
self.speedInput = QSlider(Qt.Horizontal, self); self.speedInput.setFocusPolicy(Qt.NoFocus)
self.exchangeInput = QLineEdit(self); self.exchangeInput.setPlaceholderText("Get it from the exchange website.")
self.speedInput.valueChanged[int].connect(self.downSpeedChanged)
self.nameInput.setToolTip("Your name on the network")
self.portInput.setToolTip("Between 0 and 65535 (integer only)")
self.maxconInput.setToolTip("Total users which can connect to your system")
self.maxconperipInput.setToolTip("Total connections one user can make to your system")
self.speedInput.setToolTip("This is the max.speed at which \nyou allow uploads from your system \n(For users with write permission) \nHigher values can freeze your system.")
self.maxconInput.setMinimum(3); self.maxconInput.setMaximum(100)
self.maxconperipInput.setMinimum(3); self.maxconperipInput.setMaximum(10)
self.speedInput.setMinimum(1536);
self.speedInput.setMaximum(5632);
self.homedirSelect = QPushButton('Select shared folder', self)
self.homedirInput = QLineEdit(self);
self.homedirSelect.setToolTip("Click this button to choose folder to share")
self.homedirSelect.clicked.connect(self.showDirChooser)
# setting up the layout
# self.settingsFrame = QFrame()
# self.buttonsFrame = QFrame()
# self.settingsFrame.setFrameShape(QFrame.Box); self.settingsFrame.setFrameShadow(QFrame.Plain)
# self.buttonsFrame.setFrameShape(QFrame.StyledPanel); self.buttonsFrame.setFrameShadow(QFrame.Plain)
# self.settingsLayout = QGridLayout()
# self.settingsFrame.setLayout(self.settingsLayout)
# self.buttonsLayout = QHBoxLayout()
# self.buttonsFrame.setLayout(self.buttonsLayout)
self.grid = QGridLayout()
self.setLayout(self.grid)
self.statusTip = QLabel(self);
self.statusTip.setText("Welcome")
self.statusTip.setStyleSheet("border: 1px solid black; padding-top: 10px;")
self.grid.addWidget(self.nameLabel, 0, 0, 1, 2); self.grid.addWidget(self.nameInput, 0, 2, 1, 2)
self.grid.addWidget(self.portLabel, 0, 5); self.grid.addWidget(self.portInput, 0, 6)
self.grid.addWidget(self.homedirSelect, 3, 0, 1, 2); self.grid.addWidget(self.homedirInput, 3, 2, 1, 5)
self.grid.addWidget(self.maxconLabel, 1, 0, 1, 4); self.grid.addWidget(self.maxconInput, 1, 5, 1, 1)
self.grid.addWidget(self.maxconperipLabel, 2, 0, 1, 4); self.grid.addWidget(self.maxconperipInput, 2, 5, 1, 1)
self.grid.addWidget(self.speedLabel, 4, 0, 1, 2); self.grid.addWidget(self.speedInput, 4, 2, 1, 4); self.grid.addWidget(self.speedDisplay, 4, 6)
self.grid.addWidget(self.exchangeLabel, 5, 0, 1, 2); self.grid.addWidget(self.exchangeInput, 5, 2, 1, 5)
self.grid.addWidget(self.itHurtsLabel, 6, 1, 1, 5)
self.grid.addWidget(self.mainbtn, 7, 1, 1, 2)
self.grid.addWidget(self.exchangebtn, 7, 4, 1, 2)
self.grid.addWidget(self.statusTip, 8, 0, 1, 7)
self.sett = load_settings()
self.populateForm()
# self.setFixedSize(450, 300)
self.setFixedSize(self.minimumSizeHint())
self.setWindowTitle("21Lane")
# self.statusBar().showMessage("Welcome")
# start cap monitoring thread
self.mainbtn.setEnabled(True)
self.capThread = threading.Thread(target=self.capMonitor)
self.capThread.start()
self.cerrnotifier = CapErrorNotifier()
self.cerrnotifier.s.connect(self.showCapError)
self.show()
def showCapError(self):
if self.exchangebtn.isEnabled():
self.exchangebtn.setEnabled(False)
self.exchangebtn.setStyleSheet("background-color: #bdc3c7; color: white; border: none; padding: 5px 15px;")
self.exchangebtn.disconnect()
QMessageBox.information(self, "Err...", "You must satisfy the minimum cap limit as per your exchange", QMessageBox.Ok, QMessageBox.Ok)
def setStatusTip(self, txt):
self.statusTip.setText(txt)
def showDirChooser(self):
dirname = QFileDialog.getExistingDirectory(self, "Select Directory")
if dirname:
self.homedirInput.setText(dirname)
def getSpeedText(self, value):
if value < 1024:
return str(value)+" KBPS"
elif value < 5625:
return str(round(value/1024, 2))+" MBPS"
else:
self.speedInput.setValue(5620)
return "No Limit"
def downSpeedChanged(self, value):
self.speedDisplay.setText(self.getSpeedText(value))
if value > 5625:
if self.speedDisplay.text() == 'No Limit':
return
self.speedInput.setValue(5220)
self.speedDisplay.setToolTip("May slow down your system.")
QMessageBox.warning(self, 'Message', "No Limits on Download speed.\nThis may slow down your system if many people connect to it.", QMessageBox.Ok, QMessageBox.Ok)
else:
self.speedDisplay.setToolTip("")
def populateForm(self):
self.nameInput.setText(self.sett.server_name)
self.portInput.setValue(self.sett.port)
self.maxconInput.setValue(self.sett.max_cons)
self.maxconperipInput.setValue(self.sett.max_cons_per_ip)
self.speedInput.setValue(self.sett.max_upload_speed/1024) # display in kilobytes
self.exchangeInput.setText(self.sett.exchange_url)
self.homedirInput.setText(self.sett.homedir)
def saveData(self):
# form validator
if ( (len(self.nameInput.text())==0) or \
(len(self.portInput.text())==0) or \
(len(self.homedirInput.text())==0) ):
QMessageBox.information(self, "Missed it", "Please fill all the settings before starting sharing", QMessageBox.Ok, QMessageBox.Ok)
return False
if (not os.path.exists(self.homedirInput.text())):
QMessageBox.information(self, "Caught you!", "You are trying to share a path which does not exist.\nCaught you!", QMessageBox.Ok, QMessageBox.Ok)
return False
self.sett.server_name = self.nameInput.text()
self.sett.port = self.portInput.value()
self.sett.max_cons = self.maxconInput.value()
self.sett.max_cons_per_ip = self.maxconperipInput.value()
self.sett.exchange_url = self.exchangeInput.text()
self.sett.homedir = self.homedirInput.text()
if self.speedInput.value() > 5220:
self.sett.max_upload_speed = 0
else:
self.sett.max_upload_speed = self.speedInput.value() * 1024
self.sett.max_download_speed = 1
self.sett.save_settings()
return True
def quitapp(self):
global server, gen_snapshot, app_is_running
mylog("quit event caught", gen_snapshot)
if server:
server.close_all()
del self.srv
mylog(self.snapshot_thread)
if self.snapshot_thread:
gen_snapshot = False
del self.snapshot_thread
sys.exit()
def check_server(self, pressed):
global server, gen_snapshot, server_running_status, PORT
PORT = self.sett.port
self.mainbtn.setEnabled(False)
if not server and not server_running_status:
if (self.saveData() == False):
self.mainbtn.setEnabled(True)
return
if not is_port_available(PORT):
mylog("\nPort : " + str(PORT) + " is not available\n")
QMessageBox.critical(self, "Port error", "Port " + str(PORT) + " is not available.\nPlease change the port in settings.\n", QMessageBox.Ok, QMessageBox.Ok)
self.mainbtn.setEnabled(True)
return
self.setStatusTip("Starting, please wait...")
# if not server_running_status:
# QMessageBox.critical(self, "Error", "Error while starting sharing.", QMessageBox.Ok, QMessageBox.Ok)
# self.statusBar().showMessage("Error occured.")
# return
self.srv = myserver()
self.srv.start()
msg = "Sharing on " + get_ip_address() + ":" + str(self.srv.getport())
while not server_running_status:
time.sleep(0.5)
self.mainbtn.setText("Stop Sharing")
self.mainbtn.setStyleSheet("background-color: #f62459; color: white; border: none; padding: 5px;")
self.setStatusTip(msg)
gen_snapshot = True
self.exchange_connect()
self.snapshot_thread = generate_system_snapshot()
self.snapshot_thread.start()
elif server and server_running_status:
mylog("stopping server")
self.setStatusTip("Stopping, please wait...")
server.close_all()
server_running_status = False
# wait for the thread to exit
# if it doesn't within given time, close it forcibly
count = 4
mylog("Waiting for server thread to end")
while( threading.Thread.isAlive(self.srv) and count > 0):
time.sleep(0.5)
count -= 1
if count == 0:
mylog("Shit happens! Shutting down server forcibly.")
del self.srv, server
self.srv = None
server = None
# end snapshot generation thread
if gen_snapshot:
gen_snapshot = False
# wait for the thread to exit
while( threading.Thread.isAlive(self.snapshot_thread) ):
mylog("Waiting for snapshot thread to end.")
time.sleep(1)
self.snapshot_thread = None
self.setStatusTip("Stopped")
server_running_status = False
self.exchange_disconnect()
self.mainbtn.setText("Start Sharing")
self.mainbtn.setStyleSheet("background-color: #40e0d0; color: black; border: none; padding: 5px;")\
else:
print('doing nothing')
return
self.mainbtn.setEnabled(True)
def closeEvent(self, event):
global app_is_running
app_is_running = False
try:
if self.srv is not None:
self.setStatusTip("Cleaning up")
server.close_all()
del self.srv
global gen_snapshot
if self.snapshot_thread:
gen_snapshot = False
del self.snapshot_thread
if self.exchange_process:
self.exchange_process.poll()
if not self.exchange_process.returnCode:
self.exchange_process.kill()
del self.exchange_process
self.exchange_process = None
mylog('Exchange UI closed.')
mylog("Cleaned up")
except:
pass
finally:
reply = QMessageBox.question(self, 'Close', "Are you sure to exit ?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
event.accept()
raise KeyboardInterrupt
else:
event.ignore()
def capMonitor(self):
global CAPERROR
# self.capsignal = pyqtSignal()
# self.capsignal.connect(self.showCapError)
mylog("Cap monitor starts")
while True and app_is_running:
if CAPERROR:
# QMessageBox.information(self, "Cap err..", "You must satisfy the minimum cap as per your exchange.", QMessageBox.Ok, QMessageBox.Ok)
self.cerrnotifier.s.emit()
CAPERROR = False
# don't choke while you run
time.sleep(1)
mylog("Cap monitor thread quits.")
def checkPortUI(self):
text, ok = QInputDialog.getText(self, "Input Dialog", "Enter any port")
try:
port = int(text)
if port < 0 or port > 65535:
raise ValueError
if ok:
if is_port_available(int(text)):
QMessageBox.information(self, 'Message', "Port is available", QMessageBox.Ok, QMessageBox.Ok)
else:
QMessageBox.critical(self, 'Message', "Port is unavailable", QMessageBox.Ok, QMessageBox.Ok)
except ValueError:
QMessageBox.warning(self, 'Error', "Port number should be a number between 0 and 65535", QMessageBox.Ok, QMessageBox.Ok)
def show_help(self):
url = QUrl("https://21lane.github.io/howto.html")
QDesktopServices.openUrl(url)
def show_git(self):
url = QUrl("https://github.com/21lane/21Lane")
QDesktopServices.openUrl(url)
def open_exchange(self):
global exchange_url
uri = exchange_url
self.exchange_process = subprocess.Popen([python, "exchange_client.py", uri])
def exchange_disconnect(self, signalFrom=None):
global exchange_url, exchange_connect_status
if not exchange_connect_status:
return
if not signalFrom:
reply = QMessageBox.question(self, '21Exchange', "You are connected. Do you want to log out from the server?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
else:
reply = QMessageBox.information(self, '21Exchange', "You will now be disconnected from the exchange.", QMessageBox.Ok, QMessageBox.Ok)
if (reply == QMessageBox.Yes) or (reply == QMessageBox.Ok):
if 'session_id' in ls(pwd):
f = open('session_id', 'r')
sessionid = f.read().strip()
f.close()
else:
sessionid = ''
post_data = { 'action':'disconnect' }
uri = exchange_url+'/cgi-bin/actions.py'
try:
headers = {'user-agent':'21Lane'}
r = requests.post(url=uri, data=post_data, cookies={'session_id':sessionid}, headers=headers, proxies={'socks':None, 'http':None}, timeout=5)
if r.status_code == 200 and r.text.strip() == 'ok':
exchange_connect_status = False
QMessageBox.information(self, '21Exchange', "You have been logged out.")
if 'session_id' in ls(pwd):
os.remove('session_id')
mylog("session_id file removed")
if self.exchangebtn.isEnabled():
self.exchangebtn.setEnabled(False)
self.exchangebtn.setStyleSheet("background-color: #bdc3c7; color: white; border: none; padding: 5px 15px;")
self.exchangebtn.disconnect()
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, ConnectionAbortedError, requests.exceptions.Timeout) as e:
QMessageBox.critical(self, 'Network error', 'Cannot connect to exchange. Sharing is up!', QMessageBox.Ok, QMessageBox.Ok)
# raise e
except Exception as e:
# first close any open file to avoid permissions error in windows, and other similar errors
try:
if not f.closed:
f.close()
if not dest_file.closed:
dest_file.close()
if not source_file.closed:
source_file.close
except NameError:
pass
if 'session_id' in ls(pwd):
os.remove('session_id')
QMessageBox.critical(self, 'Error', "Some error occured!", QMessageBox.Ok, QMessageBox.Ok)
mylog(str(e) + ' ' + 'is the error')
raise e
def exchange_connect(self):
global server, exchange_url, PORT, exchange_connect_status
if len(self.sett.exchange_url) == 0:
return
if not server:
QMessageBox.warning(self, 'Sorry', "You must have sharing enabled to connect to an exchange.", QMessageBox.Ok, QMessageBox.Ok)
return
try:
exchange_url = self.sett.exchange_url
url = exchange_url+"/cgi-bin/actions.py"
server_name = self.sett.server_name
post_data = { 'action':'connect', 'server_name':server_name, 'port':PORT, 'IP':get_ip_address() }
if 'session_id' in ls(pwd):
f = open('session_id', 'r')
ckstr = f.read()
f.close()
ck = ckstr.strip()
else:
ck = None
if not ck is None:
cookie_dic = {'session_id':ck}
else:
cookie_dic = None
headers = {'user-agent':'21Lane'}
r = requests.post(url, data=post_data, cookies=cookie_dic, headers=headers, proxies={'socks':None, 'http':None}, timeout=5)
sessionid = None
if r.status_code == 200:
f = open('session_id', 'w')
f.write(r.text.strip())
f.close()
if r.status_code == 404:
QMessageBox.warning(self, "Invalid URL", "Oops... You entered an invalid URL / host.", QMessageBox.Ok, QMessageBox.Ok)
return
exchange_connect_status = True
if not self.exchangebtn.isEnabled():
self.exchangebtn.setEnabled(True)
self.exchangebtn.setStyleSheet("background-color: #0a2c9b; color: white; border: none; padding: 5px 15px;")
self.exchangebtn.clicked.connect(self.open_exchange)
# self.exchangebtn.setEnabled(True)
# self.exchangebtn.setStyleSheet("background-color: blue; color: white; border: none; padding: 5px;")
# self.exchangebtn.clicked[bool].connect(self.open_exchange)
# now upload the snapshot file, if any like a good boy
# this didn't work
# if ('snapshot.json' in ls(pwd) and exchange_url):
# f = open('snapshot.json', 'rb')
# print("uploading snapshot file")
# r = requests.post(url=exchange_url, files={'filecontent':f.read()}, stream=True)
# f.close()
# print("snapshot file uploaded")
# check whether the file is ready to be uploaded and
# send a message to exchange_url, indicating the file is ready to be uploaded
# if 'snapshot.json' in ls(pwd) and exchange_url:
# r = requests.post(url='http://localhost:8000/cgi-bin/get_snapshot_file.py')
# print(r.textn)
# now trying to place the snapshot file in anonymous user's directory
# to be uploaded to the exchange.
# oh boy, you worked graciously, i'll keep you
# fuck all the above methods..
# let them be in comments for future references
# dest_dir = self.sett.homedir
# dest_path = os.path.join(dest_dir, 'snapshot.json')
# dest_file = open(dest_path, 'wb')
# source_file = open('snapshot.json', 'rb')
# dest_file.write(source_file.read())
# source_file.close()
# dest_file.close()
# # now notify you dad to take the parcel
# mylog('Asking dad to take the parcel')
# r = requests.post(url=exchange_url, data={'action':'snapshot'}, cookies={'session_id':sessionid}, timeout=5, proxies={'socks':None, 'http':None})
# # print(r.text, 'is the response for snapshot')
# if r.status_code==200 and r.text.strip()=='ok':
# mylog('Snapshot file uploaded successfully.')
# os.remove(dest_path)
# else:
# mylog("Some error occured while uploading snapshot.")
# uploading of snapshot is to be handled solely by snapshot thread
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError, ConnectionAbortedError, requests.exceptions.Timeout) as e:
QMessageBox.critical(self, 'Error', 'Network error!\nCannot connect to exchange.', QMessageBox.Ok, QMessageBox.Ok)
# raise e
except Exception as e:
# first close any open file to avoid permissions error in windows, and other similar errors
try:
if not f.closed:
f.close()
if not dest_file.closed:
dest_file.close()
if not source_file.closed:
source_file.close
except NameError:
pass
if 'session_id' in ls(pwd):
os.remove('session_id')
QMessageBox.critical(self, 'Error', "Some error occured!", QMessageBox.Ok, QMessageBox.Ok)
mylog(str(e) + ' ' + 'is the error')
# raise e
if __name__ == "__main__":
import platform
if "windows" in platform.platform().lower():
import ctypes
myappid=u'himanshub16.21Lane-min.1.2'
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
app = QApplication([])
app.setWindowIcon(QIcon('icons/favicon.ico'))
ex = MainUI()
sys.exit(app.exec_())
|
main.py
|
import db
from twitch import (
get_OAuth,
is_streamer_live,
name_changed,
get_stream_title,
get_streamer_id,
)
from tt import *
from utils import *
import time
import schedule
import threading
from dotenv import load_dotenv
load_dotenv()
def main():
# Variável que controla se o houve
# modificações no dados do streamer
modified = False
# Lista das categorias permitidas
categories = [
"Art",
"Science & Technology",
"Makers & Crafting",
"Talk Shows & Podcasts",
]
# Definir tokens e header
access_token, header = get_OAuth()
# DataFrame com os dados dos streamers
streamers = read_streamers()
# Se não estiver vazio vamos pegar os IDs
if not streamers.empty:
# Verificar se o streamer está registado na DB
results = db.return_streamer_names().fetchall()
# Guardar o nome dos streamers já registados
names = []
for r in results:
names.append(*r)
# Retorno de todos os streamers que não estão na BD
streamers = delete_exist_streamers(streamers, names)
# Retornar o dataframe com o id de cada novo streamer
streamers = get_streamer_id(streamers, header)
# Inserir cada streamer na BD
db.insert_streamers(streamers)
if names:
# DataFrame com os dados dos streamers
streamers = read_streamers()
# Retornar todas as infos dos streamers na DB
results = db.return_streamer_info().fetchall()
# Preencher o dataframe com os Ids
for streamer in results:
name = streamer[0]
idt = streamer[1]
index = streamers[streamers["Nome"] == str(name)].index
streamers.loc[index, "Id"] = str(idt)
# Antes de tudo vamos verificar se algum streamer
# trocou o nome do canal
# print(streamers)
streamers, modified = name_changed(streamers, header)
if modified:
# Guardar alterações no .csv
update_csv(streamers)
# Ler novamente
streamers = read_streamers()
results = db.return_streamer_info().fetchall()
# Verificar se o streamer está em live ou não
for streamer in results:
idt = streamer[1]
is_live, category = is_streamer_live(str(idt), header)
# Além de verificar se está em live, verifica se está
# a fazer live em uma categoria permitida
if is_live and category in categories:
title = get_stream_title(idt, header)
# Remover comandos do título
title = remove_cmds_from_title(title)
# Verificar se ele já estava live antes
# se sim não fazemos outra vez o tweet
# se não fazemos o tweet
is_live = streamer[4]
if not is_live:
twitch = streamer[2]
twitter = streamer[3]
is_print = streamer[5]
streamer_type = streamer[6]
hashtags = streamer[7]
# Vamos fazer o tweet
db.insert_on_stream(idt, True)
tweet(
twitch,
twitter,
title,
is_print,
streamer_type,
category,
hashtags,
)
else:
db.insert_on_stream(idt, False)
else:
print("O DataFrame está vazio!")
def threaded_job(job):
# Função para correr a main em modo threading
thread = threading.Thread(target=main)
thread.start()
# Esperar pela thread terminar
thread.join()
if __name__ == "__main__":
schedule.every(15).seconds.do(threaded_job, main)
while True:
schedule.run_pending()
# Performance measure
time.sleep(10)
|
brenbot.py
|
#!/usr/bin/env python3
import sys
sys.path.append('/usr/local/lib/python3.5/site-packages')
import logging
import logging.handlers
import os
import time
import random
import threading
import subprocess
from slackclient import SlackClient
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Get the bot's token
SLACK_BOT_TOKEN = ""
with open(os.path.join(__location__, 'secret.txt')) as fp:
SLACK_BOT_TOKEN = fp.read().strip()
BOT_ID = ""
AT_BOT = ""
# Start Slack client
slack_client = SlackClient(SLACK_BOT_TOKEN)
# define delay for reading from the socket
READ_WEBSOCKET_DELAY = 1
# define the array used for reacting to users
REACT_TO_USERS = []
REACTIONS = []
# Create the running var
IS_RUNNING = True
# Deafults for logging
LOG_FILENAME = os.path.join(__location__, "logs/Brenbot.log")
LOG_LEVEL = logging.INFO # Could be e.g. "DEBUG" or "WARNING"
# Configure logging to log to a file, making a new file at midnight and keeping the last 3 day's data
# Give the logger a unique name (good practice)
logger = logging.getLogger(__name__)
# Set the log level to LOG_LEVEL
logger.setLevel(LOG_LEVEL)
# Make a handler that writes to a file, making a new file at midnight and keeping 3 backups
handler = logging.handlers.TimedRotatingFileHandler(LOG_FILENAME, when="midnight", backupCount=3)
# Format each log message like this
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
# Attach the formatter to the handler
handler.setFormatter(formatter)
# Attach the handler to the logger
logger.addHandler(handler)
# Make a class we can use to capture stdout and sterr in the log
class MyLogger(object):
def __init__(self, logger, level):
"""Needs a logger and a logger level."""
self.logger = logger
self.level = level
def write(self, message):
# Only log if there is a message (not just a new line)
if message.rstrip() != "":
self.logger.log(self.level, message.rstrip())
# Replace stdout with logging to file at INFO level
sys.stdout = MyLogger(logger, logging.INFO)
# Replace stderr with logging to file at ERROR level
sys.stderr = MyLogger(logger, logging.ERROR)
def main():
global REACTIONS, REACT_TO_USERS, IS_RUNNING, BOT_ID, AT_BOT, __location__
try:
if slack_client.rtm_connect():
REACTIONS = get_emojis("emoji_filter.txt")
REACT_TO_USERS = get_users_id("react_to.txt")
with open(os.path.join(__location__, "my_name.txt")) as fp:
BOT_ID = get_user_id(fp.readline().strip())
AT_BOT = "<@" + BOT_ID + ">"
print("Brenbot connected and running!")
reactions = threading.Thread(group=None, target=reactions_loop, name="Reactions")
motd = threading.Thread(group=None, target=motd_loop, name="MotD")
reactions.start()
motd.start()
reactions.join()
motd.join()
else:
print("Connection failed. Invalid Slack token?")
except Exception as exception:
IS_RUNNING = False
print(str(exception))
def reactions_loop():
global IS_RUNNING
try:
while IS_RUNNING and threading.main_thread().is_alive():
parse_slack_output(slack_client.rtm_read())
time.sleep(READ_WEBSOCKET_DELAY)
except:
IS_RUNNING = False
def motd_loop():
global IS_RUNNING
try:
while IS_RUNNING and threading.main_thread().is_alive():
# current_time = time.localtime()
# Once a day at noon, post a MotD
# if current_time[3] == 12 and current_time[4] == 0 and current_time[5] == 0:
post_motd()
time.sleep(86400)
except:
IS_RUNNING = False
def post_motd():
global __location__
with open(os.path.join(__location__, "MotD.txt")) as fp:
lines = fp.read().strip().split("\n")
main_message = lines[random.randrange(0, len(lines))]
with open(os.path.join(__location__, "MotD_random.txt")) as fp:
lines = fp.read().strip().split("\n")
random_message = lines[random.randrange(0, len(lines))]
with open(os.path.join(__location__, "MotD_channels.txt")) as fp:
channels = fp.read().strip().split("\n")
for channel in channels:
slack_client.api_call("chat.postMessage", channel=channel,
text=main_message + random_message, as_user=True)
def get_user_id(name):
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == name:
return user.get('id')
return None
else:
raise Exception("API call 'users.list' was unsuccessful")
def get_users_id(file_name):
global __location__
with open(os.path.join(__location__, file_name)) as fp:
names = fp.read().strip().split("\n")
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
# retrieve all users so we can find our bot
users = api_call.get('members')
results = []
for user in users:
if 'name' in user and user.get('name') in names:
results = results + [user.get('id')]
return results
else:
raise Exception("API call 'users.list' was unsuccessful")
def react_to_user(channel, ts):
"""
Reacts to messages by a user by using reactions (emoji)
"""
slack_client.api_call('reactions.add', name=REACTIONS[random.randrange(0, len(REACTIONS))],
timestamp=ts, channel=channel)
def react_to_message(channel, user, text):
"""
Reacts to a command (message) from a user by posting a response.
"""
# If fortune is in the text, we will assume the user wants a brenbot fortune
if 'fortune' in text and 'wild' in text:
fortune = subprocess.check_output(['fortune']).decode('utf-8')
fortune = "<@" + user + ">: " + fortune
slack_client.api_call('chat.postMessage', channel=channel, text=fortune, as_user=True)
elif 'fortune' in text and 'cow' in text:
fortune = subprocess.check_output(['fortune | cowsay -W 35'], shell=True).decode('utf-8')
fortune = "<@" + user + ">:\n```" + fortune + "```"
slack_client.api_call('chat.postMessage', channel=channel, text=fortune, as_user=True)
elif 'fortune' in text:
fortune = subprocess.check_output(['fortune']).decode('utf-8')
fortune = "<@" + user + ">: " + fortune
slack_client.api_call('chat.postMessage', channel=channel, text=fortune, as_user=True)
elif user in REACT_TO_USERS and 'say:' in text:
# TODO make this command cleaner execution-wise
say_to_channel = ''.join(text.split('say:')[1:]).strip()
channel_to_send = '#' + say_to_channel.split(' ')[0].strip()
say_to_channel = ' '.join(say_to_channel.split(' ')[1:]).strip()
slack_client.api_call('chat.postMessage', channel=channel_to_send, text=say_to_channel, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events fire-hose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'user' in output and output['user'] in REACT_TO_USERS \
and 'type' in output and output['type'] == 'message' \
and 'channel' in output \
and 'ts' in output:
react_to_user(output['channel'], output['ts'])
if output and 'type' in output and output['type'] == 'message' \
and 'text' in output and AT_BOT in output['text'] \
and 'channel' in output \
and 'user' in output:
react_to_message(output['channel'], output['user'], output['text'])
def get_emojis(file_name):
global __location__
with open(os.path.join(__location__, file_name)) as fp:
key_filter = fp.read().strip()
api_call = slack_client.api_call("emoji.list")
emojis = api_call.get("emoji")
return [key for key in emojis.keys() if key_filter in key]
if __name__ == "__main__":
sys.exit(main())
|
car_client.py
|
"""
此模块做停车管理系统的客户端
Author:Recall
Date: 2018-10-19
module: socket、multiprocessing、sys、os、time、signal
Email:
"""
from socket import *
from setting import *
from messageAff import user_message
from multiprocessing import Process
import sys,os,time,signal
class carClient(object):
def __init__(self):
self.sockfd = socket(AF_INET,SOCK_STREAM)
self.sockfd.connect(ADDR)
self.mes = user_message()
signal.signal(signal.SIGINT,self.dis_signal)
def dis_signal(self,sig,frame):
if sig == signal.SIGINT:
self.sockfd.send(b'quit')
sys.exit("强制退出")
elif sig == signal.SIGQUIT:
self.sockfd.send(b'quit')
def init_user(self,mec_list,is_regist=True):
if is_regist:
use = mec_list
else:
data = 'select_user %s' % mec_list#向数据库请求用户信息
self.sockfd.send(data.encode())
# 向数据库获取信息
use = self.sockfd.recv(2048).decode().split(" ")
if use[0] == 'error':
print("信息初始化错误")
return False
return use
def user(self,account):
'''
此函数用来获取用户信息
此函数能查看用户信息,并且做信息修改
'''
print('用户名\t\t:',self.userName)
print('联系方式\t:',self.userPhon)
print('邮箱\t\t:',self.userEmail)
print('车名\t\t:',self.carName)
print('类型\t\t:',self.carClass)
print('颜色\t\t:',self.carColor)
print('车牌号\t\t:',self.userPlat)
if self.isVIP != "False":
print('是否VIP\t\t:','是')
else:
print('是否VIP\t\t:',"否")
# 这个choise模拟编辑按钮和退出按钮
# choise = input("编辑(输入(write))or 退出(输入(quit))")
# if choise == 'write':
# user_list = self.mes.change_user_msg(\
# self.userName,self.userPhon,self.userEmail,\
# self.userPlat,self.carName,self.carClass,self.carColor)
# # 这个choise模拟保存按钮和退出按钮
# choise = input('保存(输入(save))or退出(输入(quit))')
# if user_list[0] and choise == 'save':
# data_list = ["change",account]
# data_list = data_list + user_list[1:]
# data = ' '.join(data_list)
# self.sockfd.send(data.encode())
# data = self.sockfd.recv(1024).decode()
# if data == 'ok':
# print("信息修改成功")
# else:
# print('信息修改失败')
# elif user_list[0]:
# print("信息错误")
def select_history(self,account, userPlat):
'''
此函数用来处理历史记录的查询
参数:账号
'''
data = "select_history %s %s" % (account,userPlat)
self.sockfd.send(data.encode())
history = self.sockfd.recv(4096).decode()
if history == 'no_more':
print('你没有相关的历史记录了')
return '你没有相关的历史记录了'
else:
return history
# data_list = history.split(' ')
# for i in data_list:
# mes_list = i.split('##')
# print('========I停车==========')
# print("尊敬的%s:" % self.userName)
# print("你的爱车%s于 %s 使用I停车"%(self.carName,mes_list[0]))
# print("于 %s 离开" % mes_list[1])
# print('=======================')
# print()
# print()
# choise = input("查看更多(输入:more)")
# # 这个choise模拟查看更多历史记录a
# if choise == 'more':
# # self.sockfd.send(b'more')
# self.select_history(account)
# else:
# self.sockfd.send(b'quit')
def add(self):
'''
后期考虑实现
'''
pass
def choise_car(self):
'''
后期考虑实现
'''
pass
def change_password(self,account,email,is_forget=False):
'''
此类函数用来修改用户密码
邮箱短信验证码,需要通过邮箱获取短信验证码
'''
if is_forget:
data = 'select_email %s'% account
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == 'error':
# print("未找到你的有邮箱,请确认账号是否有误")
return "未找到你的有邮箱,请确认账号是否有误"
else:
client_email = aff #此处模拟向服务器接受邮箱
print(aff)
else:
client_email = self.userEmail
if aff == email:
auth_code = self.mes.my_email(client_email)#返回值为验证码或者false
return auth_code
else:
return "您输入的邮箱和注册邮箱不一致,请检查后再输入"
# if auth_code:
# input_auth_code = input("请输入邮箱验证码")
# if input_auth_code == auth_code:
# new_password = input("请输入新的密码")
# aff_new = input("请再次输入密码")
# if new_password != aff_new:
# print('前后密码不一致')
# # 判断密码是否符合要求
# if not self.mes.user_passwd_con(new_password):
# print("密码必须为6-10数字和字母")
# #这个choise模拟确认密码和取消按钮
# choise = input("确认修改(输入(aff))or取消(输入(quit))")
# if choise == 'aff' and new_password == aff_new and \
# self.mes.user_passwd_con(new_password):
# new_password = self.mes.encrypt(new_password)
# data = "change_password %s %s" % (account,new_password)
# self.sockfd.send(data.encode())
# aff = self.sockfd.recv(1024).decode()
# if aff:
# print("修改成功")
# return new_password
# else:
# self.change_password(self,account,is_forget=False)
# else:
# print("没有收到验证码?点击再次获取")
# return "没有收到验证码?点击再次获取"
# 这个choise模拟确认再次获取验证码
# choise = input("点击(输入(intp))")
# if choise == "intp":
# self.change_password(account)
# return False
def interface(self,account,pid):
"""
此类函数用来处理客户端登陆后的操作
功能包括:查看个人信息、查看停车记录、修改用户名、修改密码、接受服务端信息
"""
# while True:
# print("=====请选择相应的功能=====")
# print(1,"个人信息")
# print(2,'停车记录')
# # print(3,'修改用户名') #个人信息已经处理
# print(4,'修改密码')
# print(5,'退出')
# print('=========================')
# choise = input('请选择相应功能')
# if choise == '1':
# self.init_user(mec_list=account,is_regist=False)
# self.user(account)
# elif choise == '2':
# self.select_history(account)
# elif choise == '3':
# pass
# elif choise == '4':
# self.change_password(account)
# elif choise == '5':
# # self.sockfd.send(b"quit")
# os.kill(pid,signal.SIGQUIT)
# break
# else:
# print("请正确输入命令")
pass
def get_history_msg(self,account,plat):
'''
初始化历史信息
用来接受用户不在线时,服务器发送的信息
'''
self.sockfd.close()
self.sockfd = socket(AF_INET,SOCK_STREAM)
self.sockfd.connect(ADDR)
#先给服务器发送一个标志,表示该套接字接受该客户端的信息
def data_recv(account,plat):
data = "get_history_msg %s %s" % (account,plat)
self.sockfd.send(data.encode())
data = self.sockfd.recv(4096).decode()
if data != 'no_data':
data_list = data.split("#=#")
for i in data_list:
msg_list = i.split("##")
play_msg = msg_list[2].split(" ")
if msg_list[1] == "park":
print("+++++++++++++I停车++++++++++")
print('尊敬的:%s' % self.userName)
print("你的车:%s" % self.carName)
print("于%s使用I停车"% play_msg[-1])
print("位置为:%s" % play_msg[0])
print("取车码:%s" % play_msg[1])
print("++++++++++++++++++++++++++++")
print()
print()
elif msg_list[1] == 'get_car':
print("************I停车***************")
print('尊敬的:%s' % self.userName)
print("你的车:%s" % self.carName)
print("于%s离开车库"% play_msg[-1])
print("停车时长:" + play_msg[0])
print("费用:" + play_msg[1])
print("余额:" + play_msg[2])
print("*******************************")
print()
print()
data_recv(account,plat)
while True:
# 睡十五分钟
time.sleep(15*60)
data_recv(account,plat)
def msg_handing(self,account,plat):
'''
此类函数用来专门处理信息的接收
参数:账号、车牌
每隔一段时间就发送一次消息请求
判断消息类型,做格式化处理,需要创建新的进程和新的套接字
'''
p = Process(target=self.get_history_msg,args=(account,plat))
p.daemon = True
p.start()
self.interface(account,p.pid)
def login(self,account='',password='',is_regist=False,mes_list=[]):
'''
此类函数用处理用户登录
获取用户账号和密码,并发送给服务器
'''
account = account
password = password
if not account:
# account = input("请输入账号")
return "请输入账号"
#判断手机号是否合法
if not self.mes.user_phon_con(account):
# print('手机号错误')
return 手机号错误
if not password:
# password = input("请输入")
return "请输入密码"
password = self.mes.encrypt(password)
# 这个choise模拟登录的点击按钮
# choise = input('登录(输入login)')
# if choise == 'login' and self.mes.user_phon_con(account):
message = 'login %s %s' % (account,password)
self.sockfd.send(message.encode())
data = self.sockfd.recv(1024).decode()
if data == 'ok':
print("登录成功")
return "登录成功"
#这里补充用户初始化信息
# if is_regist:
# mes_list = [mes_list[0],mes_list[1],mes_list[2],mes_list[4],mes_list[5],mes_list[6],mes_list[7]]
# self.init_user(mes_list,is_regist=is_regist)
# else:
# self.init_user(account,is_regist=is_regist)
# self.msg_handing(account,self.userPlat)
else:
print("登录失败,请正确输入用户信息")
return "登录失败,请正确输入用户名或密码"
# 这个choise模拟登录失败的弹框提示确认按钮
# choise = input("确认(输入aff)")
# if choise == 'aff':
# return
def forget(self, account, email):
'''
此模块做忘记密码处理
功能暂时不实现
'''
# 由于初始界面就是登录界面,那么用户应该是在登录不上的情况下才会找回密码
# 所以账号应该是登录界面已经输入的状态下获取,现在下面代码中再次让用户输
# 入账号!模拟登录不上时获取登录界面的账号值,后期图形界面应该是传入账号
# account = input("请输入登录账号")
# account = account
# 界面同修改密码
aff = self.change_password(account, email, is_forget=True)
if aff:
# self.login(account=account,password=aff,is_regist=False)
return aff
else:
return "修改失败"
# print("修改失败")
def regist(self, L):
'''
此类方法用来处理用户注册功能
初步判断用户信息是否合法,并将信息发送给服务器进行处理
'''
mes_list = self.mes.gain_message(L)
if mes_list[0]:
mes_list[4] = self.mes.encrypt(mes_list[4])
data_list = ["regist"]
data_list = data_list + mes_list[1:]
data = ' '.join(data_list)
#这个choise模拟点击注册按钮
# choise = input("注册(输入:regist)")
# if choise == "regist":
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == 'ok':
print('注册成功')
return '注册成功'
# self.login(data_list[2],data_list[4],is_regist=True,mes_list=data_list[1:])
else:
print(aff)
return aff
def send_email(self, my_email):
self.mes.my_email(my_email)
def main(self):
while True:
print("=====请选择相应的功能=====")
print("1 登录")
print("2 忘记密码")
print("3 注册")
print("4 退出")
print('=========================')
choise = '1'
choise = input("请输入选项")
# 这个是用来选择登录或注册的,默认是登录界面,目前还没有界面
# 无法实现,现默认为1,也就是登录界面,分别将忘记密码、注册按钮赋予不同的chois便可跳转
if choise == '1':
self.login()
elif choise == '2':
self.forget()
elif choise == '3':
self.regist()
elif choise == '4':
self.sockfd.send(b"quit")
sys.exit("程序退出")
else:
# 图形界面加载后,else字句删除
print("命令行不准确")
|
testworkflow.py
|
"""
Workflow API module tests
"""
import json
import os
import tempfile
import unittest
from http.server import HTTPServer, BaseHTTPRequestHandler
from threading import Thread
from unittest.mock import patch
from fastapi.testclient import TestClient
from txtai.api import app, start
# Configuration for workflows
WORKFLOWS = """
# Embeddings index
writable: true
embeddings:
path: sentence-transformers/nli-mpnet-base-v2
# Labels
labels:
path: prajjwal1/bert-medium-mnli
nop:
# Text segmentation
segmentation:
sentences: true
# Workflow definitions
workflow:
labels:
tasks:
- action: labels
initialize: testnotfound
args: [[positive, negative]]
multiaction:
tasks:
- action:
- labels
- nop
initialize: testapi.testworkflow.TestInitFinal
finalize: testapi.testworkflow.TestInitFinal
merge: concat
args:
- [[positive, negative], false, True]
- null
segment:
tasks:
- action: segmentation
- action: index
get:
tasks:
- task: service
url: http://127.0.0.1:8001/testget
method: get
params:
text:
post:
tasks:
- task: service
url: http://127.0.0.1:8001/testpost
params:
xml:
tasks:
- task: service
url: http://127.0.0.1:8001/xml
method: get
batch: false
extract: row
params:
text:
"""
class RequestHandler(BaseHTTPRequestHandler):
"""
Test HTTP handler.
"""
def do_GET(self):
"""
GET request handler.
"""
self.send_response(200)
if self.path.startswith("/xml"):
response = "<row><text>test</text></row>".encode("utf-8")
mime = "application/xml"
else:
response = '[{"text": "test"}]'.encode("utf-8")
mime = "application/json"
self.send_header("content-type", mime)
self.send_header("content-length", len(response))
self.end_headers()
self.wfile.write(response)
self.wfile.flush()
def do_POST(self):
"""
POST request handler.
"""
length = int(self.headers["content-length"])
data = json.loads(self.rfile.read(length))
response = json.dumps([[y for y in x.split(".") if y] for x in data]).encode("utf-8")
self.send_response(200)
self.send_header("content-type", "application/json")
self.send_header("content-length", len(response))
self.end_headers()
self.wfile.write(response)
self.wfile.flush()
class TestWorkflow(unittest.TestCase):
"""
API tests for workflows.
"""
@staticmethod
@patch.dict(os.environ, {"CONFIG": os.path.join(tempfile.gettempdir(), "testapi.yml"), "API_CLASS": "txtai.api.API"})
def start():
"""
Starts a mock FastAPI client.
"""
config = os.path.join(tempfile.gettempdir(), "testapi.yml")
with open(config, "w", encoding="utf-8") as output:
output.write(WORKFLOWS)
client = TestClient(app)
start()
return client
@classmethod
def setUpClass(cls):
"""
Create API client on creation of class.
"""
cls.client = TestWorkflow.start()
cls.httpd = HTTPServer(("127.0.0.1", 8001), RequestHandler)
server = Thread(target=cls.httpd.serve_forever)
server.setDaemon(True)
server.start()
@classmethod
def tearDownClass(cls):
"""
Shutdown mock http server.
"""
cls.httpd.shutdown()
def testServiceGet(self):
"""
Test workflow with ServiceTask GET via API
"""
text = "This is a test sentence. And another sentence to split."
results = self.client.post("workflow", json={"name": "get", "elements": [text]}).json()
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0]), 1)
def testServicePost(self):
"""
Test workflow with ServiceTask POST via API
"""
text = "This is a test sentence. And another sentence to split."
results = self.client.post("workflow", json={"name": "post", "elements": [text]}).json()
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0]), 2)
def testServiceXml(self):
"""
Test workflow with ServiceTask GET via API and XML response
"""
text = "This is a test sentence. And another sentence to split."
results = self.client.post("workflow", json={"name": "xml", "elements": [text]}).json()
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0]), 1)
def testWorkflowLabels(self):
"""
Test workflow with labels via API
"""
text = "This is the best"
results = self.client.post("workflow", json={"name": "labels", "elements": [text]}).json()
self.assertEqual(results[0][0], 0)
results = self.client.post("workflow", json={"name": "multiaction", "elements": [text]}).json()
self.assertEqual(results[0], "['positive']. This is the best")
def testWorkflowSegment(self):
"""
Test workflow with segmentation via API
"""
text = "This is a test sentence. And another sentence to split."
results = self.client.post("workflow", json={"name": "segment", "elements": [text]}).json()
self.assertEqual(len(results), 2)
results = self.client.post("workflow", json={"name": "segment", "elements": [[0, text]]}).json()
self.assertEqual(len(results), 2)
class TestInitFinal:
"""
Class to test task initialize and finalize calls.
"""
def __call__(self):
pass
|
main.py
|
import argparse
from threading import Thread
import time
from socket import *
from os.path import *
import os
import struct
import hashlib
import math
def _argparse():
parser = argparse.ArgumentParser(description="This is description!")
parser.add_argument('--ip', action='store', required=True, dest='ip', help='ip')
return parser.parse_args()
file_dir = 'share'
# --- ip identification ---
local_ip = '' # ip address of this host
init = _argparse()
peer_ip = init.ip # other peer' ip addresses
# --- ports identification ---
port = 21000 # TCP receive port (used for receiving file)
# --- The following codes are mainly divided into 3 parts: [Thread], [Module] and [Function] ---
# --- the function marked as [Thread] will be a real thread in the running time ---
# --- the function marked as [Module] can perform some important functions such as send file and detect online ---
# --- rhe function marked as [Function] just performs some essential function ---
# [Thread: send file by TCP]
def send_file(receiver_ip, receiver_port):
while True:
while True:
try:
sender_socket = socket(AF_INET, SOCK_STREAM)
sender_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sender_socket.connect((receiver_ip, receiver_port))
break
except Exception as e:
pass
while True:
try:
total_file_list = scan_file('share')
for file_name in total_file_list:
file_size = os.path.getsize(file_name)
sender_socket.send(create_file_info(file_name))
while True:
file_flag_b = sender_socket.recv(4)
file_flag = struct.unpack('!I', file_flag_b)[0]
if file_flag == 0:
break
else: # file_flag = 1, 2, 3
sendFile(file_name, file_size, sender_socket)
except:
break
# [Function: traverse the file]
def scan_file(file_dir):
flag = os.path.exists(file_dir)
if not flag:
os.mkdir(file_dir)
file_list = []
file_folder_list = os.listdir(file_dir)
for file_folder_name in file_folder_list:
suffixName = file_folder_name[-8:]
if suffixName != '.download':
if isfile(join(file_dir, file_folder_name)):
file_list.append(join(file_dir, file_folder_name))
else:
file_list.extend(scan_file(join(file_dir, file_folder_name)))
return file_list
# [Function: get file information]
def create_file_info(file_name):
file_size = os.path.getsize(file_name)
file_mtime = os.path.getmtime(file_name)
file_md5 = create_file_md5(file_name)
file_info = struct.pack('!QQd', len(file_name.encode()), file_size,
file_mtime) + file_name.encode() + file_md5.encode()
return file_info
# [Function: get file md5]
def create_file_md5(file_name):
file = open(file=file_name, mode='rb')
file.seek(0)
content = file.read(1024 * 1024 * 4)
content_md5 = hashlib.md5(content).hexdigest()
file.close()
return content_md5
# [Module: send file by TCP]
def sendFile(file_name, file_size, sender_socket):
file_name_length = len(file_name.encode())
sender_socket.send(struct.pack('!I', file_name_length) + file_name.encode())
for i in range(50):
sender_socket.send(get_file_block(file_name, file_size, i)) # transfer the file by blocks
sender_socket.close()
# [Function: get each file block for send]
def get_file_block(file_name, file_size, block_index):
block_size = math.ceil(file_size / 10)
f = open(file_name, 'rb')
f.seek(block_index * block_size)
file_block = f.read(block_size)
f.close()
return file_block
# [Thread: receive the peer's files]
def receive_file(local_ip, port):
receiver_socket = socket(AF_INET, SOCK_STREAM)
receiver_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
receiver_socket.bind((local_ip, port))
receiver_socket.listen(128)
while True:
connection_socket, sender_addr = receiver_socket.accept()
while True:
try:
file_info = connection_socket.recv(1500)
file_size, file_mtime, file_name, file_md5 = unpack_file_info(file_info)
file_flag = create_file_flag(file_name, file_mtime, file_md5)
if file_flag == 0:
connection_socket.send(struct.pack('!I', file_flag))
elif file_flag == 1:
write_file(file_name, file_size, file_flag, connection_socket)
elif file_flag == 2:
# print('breakpoint resume')
os.remove(file_name + '.download')
write_file(file_name, file_size, file_flag, connection_socket)
elif file_flag == 3:
os.remove(file_name)
write_file(file_name, file_size, file_flag, connection_socket)
except:
break
# [Function: decode the binary information sent from sender]
def unpack_file_info(file_info):
file_name_length, file_size, file_mtime = struct.unpack('!QQd', file_info[:24])
file_name = file_info[24:24 + file_name_length].decode()
file_md5_b = file_info[24 + file_name_length:]
file_md5 = file_md5_b.decode()
return file_size, file_mtime, file_name, file_md5
# [Module: create the flag to indicate the file]
def create_file_flag(file_name, file_mtime, file_md5):
"""
0: The file is the same as the peer's
1: The file is added in peer's side, not in this side
2: The file is shared, but not completely received
3: Sender's file is updated
"""
if not os.path.exists(file_name):
if not os.path.exists(file_name + '.download'):
file_flag = 1
else:
file_flag = 2
else:
host_file_md5 = create_file_md5(file_name)
if file_md5 == host_file_md5:
file_flag = 0
else:
host_file_mtime = os.path.getmtime(file_name)
if host_file_mtime < file_mtime:
file_flag = 3
else:
file_flag = 0
return file_flag
# [Module: download the files]
def write_file(file_name, file_size, file_flag, connection_socket):
print(file_name)
path, rest_file_name = os.path.split(file_name)
if not path == '':
flag = os.path.exists(path)
if not flag:
os.makedirs(path)
file_flag_b = struct.pack('!I', file_flag)
connection_socket.send(file_flag_b)
file_length = struct.unpack('!I', connection_socket.recv(4))[0]
file_name = connection_socket.recv(file_length).decode()
f = open(file=file_name + '.download', mode='wb')
while True:
text = connection_socket.recv(1024 * 64)
f.write(text)
if text == b'': # the file is transferred completely
break
f.close()
file_flag = 0
file_flag_b = struct.pack('!I', file_flag)
connection_socket.send(file_flag_b)
os.rename(file_name + '.download', file_name)
def main():
r = Thread(target=receive_file, args=(local_ip, port))
r.start()
s = Thread(target=send_file, args=(peer_ip, port))
s.start()
if __name__ == '__main__':
main()
|
selective_search_multiprocess.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import sys
from multiprocessing import Process
import os
import mmcv
import cv2
import selectivesearch
import matplotlib.pyplot as plt
import pprint
import matplotlib.patches as mpatches
import skimage.io
import selective_search
def work(begin_line,end_line):
cnt = 0
inner_cnt = 0
proposals_list = []
with open('../data/VOCdevkit/train.txt', 'r') as f:
for line in f.readlines():
if cnt < begin_line:
continue
if cnt > end_line:
break
inner_cnt += 1
if inner_cnt % 10 == 0:
print('process %d gone %d' % (begin_line, inner_cnt))
img_name = line.strip() + '.jpg'
print(img_name)
# img = cv2.imread('../data/VOCdevkit/JPEGImages/'+img_name)
# img_lbl, boxes = selectivesearch.selective_search(
# img,scale=500,sigma=0.9, min_size=20)
image = skimage.io.imread('../data/VOCdevkit/JPEGImages/' + img_name)
boxes = selective_search.selective_search(image, mode='fast', random_sort=False)
proposals = []
# print(len(boxes))
boxes_filter = selective_search.box_filter(boxes, min_size=20, topN=1000)
for box in boxes_filter:
proposal = list(box)
proposal.append(1)
proposals.append(proposal)
proposals_list.append(proposals)
mmcv.dump(proposals_list, '../ss_dump_dir/' + str(begin_line) + '.pkl')
if __name__ == '__main__':
print(os.cpu_count())
print(__doc__)
cnt = 0
process_list = []
pro_num = 40
total_num = 10728
proposals_list = [[] for i in range(pro_num+1)]
le = total_num // pro_num
for n in range(pro_num):
p = Process(target=work, args=(le*n,le*(n+1)))
process_list.append(p)
p.start()
p = Process(target=work, args=(le*pro_num,total_num))
p.start()
process_list.append(p)
for p in process_list:
p.join()
print('one process is over +++++')
# cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
# cv.imshow("edges", edges)
# cv.imshow("edgeboxes", im)
# cv.waitKey(0)
# cv.destroyAllWindows()
|
context_test.py
|
"""Tests for context.py."""
import logging
import random
import socket
import threading
import time
from .google_imports import apiproxy_errors
from .google_imports import datastore
from .google_imports import datastore_errors
from .google_imports import datastore_pbs
from .google_imports import datastore_rpc
from .google_imports import memcache
from .google_imports import taskqueue
from .google_test_imports import unittest
from .google_test_imports import real_unittest
from . import context
from . import eventloop
from . import model
from . import query
from . import tasklets
from . import test_utils
# Return values for memcache_{set,add,replace,cas}.
STORED = True
NOT_STORED = False
class MyAutoBatcher(context.AutoBatcher):
_log = []
@classmethod
def reset_log(cls):
cls._log = []
def __init__(self, todo_tasklet, limit):
def wrap(todo, options):
self.__class__._log.append((todo_tasklet.__name__, todo))
return todo_tasklet(todo, options)
super(MyAutoBatcher, self).__init__(wrap, limit)
class ContextTestMixin(object):
the_module = context
def testContext_AutoBatcher_Get(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=['Foo', 1])
key2 = model.Key(flat=['Foo', 2])
key3 = model.Key(flat=['Foo', 3])
fut1 = self.ctx.get(key1)
fut2 = self.ctx.get(key2)
fut3 = self.ctx.get(key3)
ent1 = yield fut1
ent2 = yield fut2
ent3 = yield fut3
raise tasklets.Return([ent1, ent2, ent3])
ents = foo().get_result()
self.assertEqual(ents, [None, None, None])
log = MyAutoBatcher._log
self.assertEqual(len(log), 4)
name, todo = log[0]
self.assertEqual(name, '_memcache_get_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[1]
self.assertEqual(name, '_memcache_set_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[2]
self.assertEqual(name, '_memcache_get_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[3]
self.assertEqual(name, '_get_tasklet')
self.assertEqual(len(todo), 3)
@tasklets.tasklet
def create_entities(self):
key0 = model.Key(flat=['Foo', None])
ent1 = model.Model(key=key0)
ent2 = model.Model(key=key0)
ent3 = model.Model(key=key0)
fut1 = self.ctx.put(ent1)
fut2 = self.ctx.put(ent2)
fut3 = self.ctx.put(ent3)
key1 = yield fut1
key2 = yield fut2
key3 = yield fut3
raise tasklets.Return([key1, key2, key3])
def make_bad_transaction(*arg, **kwargs):
raise NotImplementedError
def testContext_AutoBatcher_Put(self):
keys = self.create_entities().get_result()
self.assertEqual(len(keys), 3)
self.assertTrue(None not in keys)
log = MyAutoBatcher._log
self.assertEqual(len(log), 2)
name, todo = log[0]
self.assertEqual(name, '_put_tasklet')
self.assertEqual(len(todo), 3)
name, todo = log[1]
self.assertEqual(name, '_memcache_del_tasklet')
self.assertEqual(len(todo), 3)
def testContext_AutoBatcher_Delete(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=['Foo', 1])
key2 = model.Key(flat=['Foo', 2])
key3 = model.Key(flat=['Foo', 3])
fut1 = self.ctx.delete(key1)
fut2 = self.ctx.delete(key2)
fut3 = self.ctx.delete(key3)
yield fut1
yield fut2
yield fut3
foo().check_success()
self.assertEqual(len(MyAutoBatcher._log), 2)
name, todo = MyAutoBatcher._log[0]
self.assertEqual(name, '_memcache_set_tasklet')
self.assertEqual(len(todo), 3)
name, todo = MyAutoBatcher._log[1]
self.assertEqual(name, '_delete_tasklet')
self.assertEqual(len(todo), 3)
def testContext_AutoBatcher_Limit(self):
# Check that the default limit is taken from the connection.
self.assertEqual(self.ctx._get_batcher._limit,
datastore_rpc.Connection.MAX_GET_KEYS)
# Create a Connection with config options that will be overridden
# by later config options
conn_config = context.ContextOptions(max_put_entities=3,
max_memcache_items=7)
conn = model.make_connection(config=conn_config,
default_model=model.Expando)
real_config = context.ContextOptions(max_put_entities=25,
max_memcache_items=100)
self.ctx = context.Context(
conn=conn,
auto_batcher_class=MyAutoBatcher,
config=real_config)
@tasklets.tasklet
def foo():
es = [model.Model(key=model.Key('Foo', None)) for _ in range(49)]
fs = [self.ctx.put(e) for e in es]
self.ctx.flush()
ks = yield fs
self.assertEqual(len(ks), 49)
self.assertTrue(all(isinstance(k, model.Key) for k in ks))
foo().get_result()
self.assertEqual(len(MyAutoBatcher._log), 4)
for name, todo in MyAutoBatcher._log[2:]:
self.assertEqual(name, '_memcache_del_tasklet')
self.assertTrue(len(todo) in (24, 25))
for name, todo in MyAutoBatcher._log[:2]:
self.assertEqual(name, '_put_tasklet')
self.assertTrue(len(todo) in (24, 25))
def testContext_AutoBatcher_Errors(self):
# Test that errors are properly distributed over all Futures.
self.ExpectWarnings()
class Blobby(model.Model):
blob = model.BlobProperty()
ent1 = Blobby()
ent2 = Blobby(blob='x'*2000000)
fut1 = self.ctx.put(ent1)
fut2 = self.ctx.put(ent2) # Error
err1 = fut1.get_exception()
err2 = fut2.get_exception()
self.assertTrue(isinstance(err1, apiproxy_errors.RequestTooLargeError))
self.assertTrue(err1 is err2)
# Try memcache as well (different tasklet, different error).
fut1 = self.ctx.memcache_set('key1', 'x')
fut2 = self.ctx.memcache_set('key2', 'x'*1000001)
err1 = fut1.get_exception()
err2 = fut1.get_exception()
self.assertTrue(isinstance(err1, ValueError))
self.assertTrue(err1 is err2)
def testContext_MultiRpc(self):
# This test really tests the proper handling of MultiRpc by
# queue_rpc() in eventloop.py. It's easier to test from here, and
# gives more assurance that it works.
config = datastore_rpc.Configuration(max_get_keys=3, max_put_entities=3)
self.ctx._conn = model.make_connection(config, default_model=model.Expando)
@tasklets.tasklet
def foo():
ents = [model.Expando() for _ in range(10)]
futs = [self.ctx.put(ent) for ent in ents]
keys = yield futs
futs = [self.ctx.get(key) for key in keys]
ents2 = yield futs
self.assertEqual(ents2, ents)
raise tasklets.Return(keys)
keys = foo().get_result()
self.assertEqual(len(keys), 10)
def testContext_Cache(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
key = yield self.ctx.put(ent1)
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
b = yield self.ctx.get(key1)
self.assertTrue(a is b)
yield self.ctx.delete(key1)
self.assertTrue(self.ctx._cache[key] is None) # Whitebox.
a = yield self.ctx.get(key1)
self.assertTrue(a is None)
self.ctx.clear_cache()
self.assertEqual(self.ctx._cache, {}) # Whitebox.
foo().check_success()
def testContext_CacheMemcache(self):
# Test that when get() finds the value in memcache, it updates
# _cache.
class Foo(model.Model):
pass
ctx = self.ctx
ctx.set_cache_policy(False)
ctx.set_memcache_policy(False)
ent = Foo()
key = ent.put()
mkey = ctx._memcache_prefix + key.urlsafe()
self.assertFalse(key in ctx._cache)
self.assertEqual(None, memcache.get(mkey))
ctx.set_memcache_policy(True)
key.get()
self.assertFalse(key in ctx._cache)
self.assertNotEqual(None, memcache.get(mkey))
eventloop.run()
ctx.set_cache_policy(True)
key.get() # Satisfied from memcache
self.assertTrue(key in ctx._cache)
def testContext_CacheMisses(self):
# Test that get() caches misses if use_datastore is true but not
# if false. This involves whitebox checks using ctx._cache.
# See issue 106. http://goo.gl/DLiij
ctx = self.ctx
key = model.Key('Foo', 42)
self.assertFalse(key in ctx._cache)
ctx.get(key, use_datastore=False).wait()
self.assertFalse(key in ctx._cache)
ctx.get(key, use_memcache=False).wait()
self.assertTrue(key in ctx._cache)
self.assertEqual(ctx._cache[key], None)
ctx.clear_cache()
ctx.get(key).wait()
self.assertTrue(key in ctx._cache)
self.assertEqual(ctx._cache[key], None)
def testContext_CachePolicy(self):
def should_cache(unused_key):
return False
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
key = yield self.ctx.put(ent1)
self.assertTrue(key1 not in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
b = yield self.ctx.get(key1)
self.assertTrue(a is not b)
yield self.ctx.delete(key1)
self.assertTrue(key not in self.ctx._cache) # Whitebox.
a = yield self.ctx.get(key1)
self.assertTrue(a is None)
self.ctx.set_cache_policy(should_cache)
self.ctx.set_memcache_policy(False)
foo().check_success()
def testContext_CachePolicyDisabledLater(self):
# If the cache is disabled after an entity is stored in the cache,
# further get() attempts *must not* return the result stored in cache.
self.ctx.set_cache_policy(lambda unused_key: True)
key1 = model.Key(flat=('Foo', 1))
ent1 = model.Expando(key=key1)
self.ctx.put(ent1).get_result()
# get() uses cache
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
self.assertEqual(self.ctx.get(key1).get_result(), ent1)
# get() uses cache
self.ctx._cache[key1] = None # Whitebox.
self.assertEqual(self.ctx.get(key1).get_result(), None)
# get() doesn't use cache
self.ctx.set_cache_policy(lambda unused_key: False)
self.assertEqual(self.ctx.get(key1).get_result(), ent1)
def testContext_NamespaceBonanza(self):
# Test that memcache ops issued for datastore caching use the
# correct namespace.
def assertNone(expr):
self.assertTrue(expr is None, repr(expr))
def assertNotNone(expr):
self.assertTrue(expr is not None, repr(expr))
def assertLocked(expr):
self.assertTrue(expr is context._LOCKED, repr(expr))
def assertProtobuf(expr, ent):
self.assertEqual(expr,
ent._to_pb(set_key=False).SerializePartialToString())
class Foo(model.Model):
pass
k1 = model.Key(Foo, 1, namespace='a')
k2 = model.Key(Foo, 2, namespace='b')
mk1 = self.ctx._memcache_prefix + k1.urlsafe()
mk2 = self.ctx._memcache_prefix + k2.urlsafe()
e1 = Foo(key=k1)
e2 = Foo(key=k2)
self.ctx.set_cache_policy(False)
self.ctx.set_memcache_policy(True)
self.ctx.set_datastore_policy(False) # This will vary in subtests
# Test put with datastore policy off
k1 = self.ctx.put(e1).get_result()
k2 = self.ctx.put(e2).get_result()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a'
assertProtobuf(memcache.get(mk1, namespace='a'), e1)
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertProtobuf(memcache.get(mk2, namespace='b'), e2)
memcache.flush_all()
self.ctx.set_datastore_policy(True)
# Test put with datastore policy on
k1_fut = self.ctx.put(e1)
while not self.ctx._put_batcher._running:
eventloop.run0()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a', as _LOCKED
assertLocked(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
self.assertEqual(k1_fut.get_result(), k1)
# Have to test one at a time, otherwise _LOCKED value may not be set
k2_fut = self.ctx.put(e2)
while not self.ctx._put_batcher._running:
eventloop.run0()
# Only k2 is found in namespace 'b', as _LOCKED
assertNone(memcache.get(mk1, namespace='b'))
assertLocked(memcache.get(mk2, namespace='b'))
# Keys should be identical
self.assertEqual(k2_fut.get_result(), k2)
memcache.flush_all()
# Test get with cold cache
e1 = self.ctx.get(k1).get_result()
e2 = self.ctx.get(k2).get_result()
eventloop.run() # Wait for memcache RPCs to run
# Neither is found in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a'
assertProtobuf(memcache.get(mk1, namespace='a'), e1)
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertProtobuf(memcache.get(mk2, namespace='b'), e2)
self.ctx.set_datastore_policy(False)
# Test get with warm cache
self.ctx.get(k1).get_result()
self.ctx.get(k2).get_result()
eventloop.run() # Wait for memcache RPCs to run
# Neither is found in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a'
assertNotNone(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertNotNone(memcache.get(mk2, namespace='b'))
self.ctx.set_datastore_policy(True)
# Test delete
self.ctx.delete(k1).check_success()
self.ctx.delete(k2).check_success()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Only k1 is found in namespace 'a', as _LOCKED
assertLocked(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
# Only k2 is found in namespace 'b', as _LOCKED
assertNone(memcache.get(mk1, namespace='b'))
assertLocked(memcache.get(mk2, namespace='b'))
memcache.flush_all()
# Test _clear_memcache (it deletes the keys)
self.ctx._clear_memcache([k1, k2]).check_success()
# Nothing should be in the empty namespace
assertNone(memcache.get(mk1, namespace=''))
assertNone(memcache.get(mk2, namespace=''))
# Nothing should be in namespace 'a'
assertNone(memcache.get(mk1, namespace='a'))
assertNone(memcache.get(mk2, namespace='a'))
# Nothing should be in namespace 'b'
assertNone(memcache.get(mk1, namespace='b'))
assertNone(memcache.get(mk2, namespace='b'))
def testContext_Memcache(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
key2 = model.Key(flat=('Foo', 2))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
self.ctx.set_memcache_policy(False) # Disable writing _LOCKED
k1, k2 = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.ctx.set_memcache_policy(True)
self.assertEqual(k1, key1)
self.assertEqual(k2, key2)
# Write to memcache.
yield (self.ctx.get(k1, use_cache=False),
self.ctx.get(k2, use_cache=False))
eventloop.run() # Let other tasklet complete.
keys = [k1.urlsafe(), k2.urlsafe()]
results = memcache.get_multi(keys, key_prefix=self.ctx._memcache_prefix)
self.assertEqual(
results,
{key1.urlsafe(): ent1._to_pb(set_key=False).SerializePartialToString(),
key2.urlsafe(): ent2._to_pb(set_key=False).SerializePartialToString(),
})
foo().check_success()
def testContext_MemcacheMissingKind(self):
ctx = context.Context(
conn=model.make_connection(default_model=None),
auto_batcher_class=MyAutoBatcher)
ctx.set_memcache_policy(False)
ctx.set_cache_policy(False)
class Foo(model.Model):
foo = model.IntegerProperty()
bar = model.StringProperty()
key1 = model.Key(flat=('Foo', 1))
ent1 = Foo(key=key1, foo=42, bar='hello')
ctx.put(ent1).get_result()
ctx.set_memcache_policy(True)
ctx.get(key1).get_result() # Pull entity into memcache
model.Model._reset_kind_map()
self.assertRaises(model.KindError, ctx.get(key1).get_result)
ctx = context.Context(
conn=model.make_connection(default_model=Foo),
auto_batcher_class=MyAutoBatcher)
ctx.set_memcache_policy(True)
ctx.set_cache_policy(False)
ent1_res = ctx.get(key1).get_result()
self.assertEqual(ent1, ent1_res)
def testContext_MemcachePolicy(self):
badkeys = []
def tracking_add_async(*args, **kwds):
try:
res = save_add_async(*args, **kwds)
if badkeys and not res:
res = badkeys
track.append((args, kwds, res, None))
return res
except Exception, err:
track.append((args, kwds, None, err))
raise
@tasklets.tasklet
def foo():
k1, k2 = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.assertEqual(k1, key1)
self.assertEqual(k2, key2)
# Write to memcache.
yield (self.ctx.get(k1, use_cache=False),
self.ctx.get(k2, use_cache=False))
eventloop.run() # Let other tasklet complete.
key1 = model.Key('Foo', 1)
key2 = model.Key('Foo', 2)
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
save_add_multi_async = self.ctx._memcache.add_multi_async
try:
self.ctx._memcache.add_multi_async = tracking_add_multi_async
yield self.ctx._memcache.flush_all_async()
track = []
foo().check_success()
self.assertEqual(len(track), 1)
self.assertEqual(track[0][0],
({key1.urlsafe(): ent1._to_pb(),
key2.urlsafe(): ent2._to_pb()},))
self.assertEqual(track[0][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 0})
yield self.ctx._memcache.flush_all_async()
track = []
self.ctx.set_memcache_policy(lambda unused_key: False)
foo().check_success()
self.assertEqual(len(track), 0)
yield self.ctx._memcache.flush_all_async()
track = []
self.ctx.set_memcache_policy(lambda key: key == key1)
foo().check_success()
self.assertEqual(len(track), 1)
self.assertEqual(track[0][0],
({key1.urlsafe(): ent1._to_pb()},))
self.assertEqual(track[0][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 0})
yield self.ctx._memcache.flush_all_async()
track = []
self.ctx.set_memcache_policy(lambda unused_key: True)
self.ctx.set_memcache_timeout_policy(lambda key: key.id())
foo().check_success()
self.assertEqual(len(track), 2)
self.assertEqual(track[0][0],
({key1.urlsafe(): ent1._to_pb()},))
self.assertEqual(track[0][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 1})
self.assertEqual(track[1][0],
({key2.urlsafe(): ent2._to_pb()},))
self.assertEqual(track[1][1], {'key_prefix': self.ctx._memcache_prefix,
'time': 2})
yield self.ctx._memcache.flush_all_async()
track = []
badkeys = [key2.urlsafe()]
self.ctx.set_memcache_timeout_policy(lambda unused_key: 0)
foo().check_success()
self.assertEqual(len(track), 1)
self.assertEqual(track[0][2], badkeys)
yield self.ctx._memcache.flush_all_async()
finally:
self.ctx._memcache.add_multi_async = save_add_multi_async
def testContext_CacheQuery(self):
@tasklets.tasklet
def foo():
key1 = model.Key(flat=('Foo', 1))
key2 = model.Key(flat=('Foo', 2))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
ent2 = model.Expando(key=key2, foo=1, bar='world')
key1a, key2a = yield self.ctx.put(ent1), self.ctx.put(ent2)
self.assertTrue(key1 in self.ctx._cache) # Whitebox.
self.assertTrue(key2 in self.ctx._cache) # Whitebox.
self.assertEqual(key1, key1a)
self.assertEqual(key2, key2a)
@tasklets.tasklet
def callback(ent):
return ent
qry = query.Query(kind='Foo')
results = yield self.ctx.map_query(qry, callback)
self.assertEqual(results, [ent1, ent2])
self.assertTrue(results[0] is self.ctx._cache[ent1.key])
self.assertTrue(results[1] is self.ctx._cache[ent2.key])
foo().check_success()
def testContext_AllocateIds(self):
@tasklets.tasklet
def foo():
key = model.Key(flat=('Foo', 1))
lo_hi = yield self.ctx.allocate_ids(key, size=10)
self.assertEqual(lo_hi, (1, 10))
lo_hi = yield self.ctx.allocate_ids(key, max=20)
self.assertEqual(lo_hi, (11, 20))
foo().check_success()
def testContext_MapQuery(self):
@tasklets.tasklet
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([1, 2, 3]))
def testContext_MapQuery_NoCallback(self):
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, None)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(len(res), 3)
for i, ent in enumerate(res):
self.assertTrue(isinstance(ent, model.Model))
self.assertEqual(ent.key.flat(), ('Foo', i + 1))
def testContext_MapQuery_NonTaskletCallback(self):
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(res, [1, 2, 3])
def testContext_MapQuery_CustomFuture(self):
mfut = tasklets.QueueFuture()
@tasklets.tasklet
def callback(ent):
return ent.key.flat()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, merge_future=mfut)
self.assertEqual(res, None)
vals = set()
for _ in range(3):
val = yield mfut.getq()
vals.add(val)
fail = mfut.getq()
self.assertRaises(EOFError, fail.get_result)
raise tasklets.Return(vals)
res = foo().get_result()
self.assertEqual(res, set([1, 2, 3]))
def testContext_MapQuery_KeysOnly(self):
qo = query.QueryOptions(keys_only=True)
@tasklets.tasklet
def callback(key):
return key.pairs()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, options=qo)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([('Foo', 1), ('Foo', 2), ('Foo', 3)]))
def testContext_MapQuery_Cursors(self):
qo = query.QueryOptions(produce_cursors=True)
@tasklets.tasklet
def callback(ent):
return ent.key.pairs()[-1]
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
res = yield self.ctx.map_query(qry, callback, options=qo)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(set(res), set([('Foo', 1), ('Foo', 2), ('Foo', 3)]))
def testContext_IterQuery(self):
@tasklets.tasklet
def foo():
yield self.create_entities()
qry = query.Query(kind='Foo')
it = self.ctx.iter_query(qry)
res = []
while True:
try:
ent = yield it.getq()
except EOFError:
break
res.append(ent)
raise tasklets.Return(res)
res = foo().get_result()
self.assertEqual(len(res), 3)
for i, ent in enumerate(res):
self.assertTrue(isinstance(ent, model.Model))
self.assertEqual(ent.key.flat(), ('Foo', i + 1))
def testContext_TransactionFailed(self):
# XXX Why is this called 'Failed'? There's no failure here.
@tasklets.tasklet
def foo():
key = model.Key(flat=('Foo', 1))
ent = model.Expando(key=key, bar=1)
yield self.ctx.put(ent)
@tasklets.tasklet
def callback():
ctx = tasklets.get_context()
self.assertTrue(key not in ctx._cache) # Whitebox.
e = yield key.get_async()
self.assertTrue(key in ctx._cache) # Whitebox.
e.bar = 2
yield e.put_async()
yield self.ctx.transaction(callback)
self.assertEqual(self.ctx._cache[key].bar, 2)
foo().check_success()
def testContext_TransactionException(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
yield ent.put_async()
raise Exception('foo')
yield self.ctx.transaction(callback)
self.assertRaises(Exception, foo().check_success)
self.assertEqual(key.get(), None)
def testContext_TransactionRollback(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
yield ent.put_async()
raise model.Rollback()
yield self.ctx.transaction(callback)
foo().check_success()
self.assertEqual(key.get(), None)
def testContext_TransactionRollbackException(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
class CustomException(Exception):
pass
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
# Cause rollback to return an exception
ctx = tasklets.get_context()
ctx._conn._end_transaction = self.make_bad_transaction
yield ent.put_async()
raise CustomException()
yield self.ctx.transaction(callback)
try:
foo().check_success()
self.fail()
except CustomException:
pass # good
self.assertEqual(key.get(), None)
def testContext_TransactionAddTask(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
ctx = tasklets.get_context()
yield ctx.put(ent)
taskqueue.add(url='/', transactional=True)
yield self.ctx.transaction(callback)
foo().check_success()
def testContext_TransactionMemcache(self):
class Foo(model.Model):
name = model.StringProperty()
foo1 = Foo(name='foo1')
foo2 = Foo(name='foo2')
key1 = foo1.put()
key2 = foo2.put()
skey1 = self.ctx._memcache_prefix + key1.urlsafe()
skey2 = self.ctx._memcache_prefix + key2.urlsafe()
# Be sure nothing is in memcache.
self.assertEqual(memcache.get(skey1), None)
self.assertEqual(memcache.get(skey2), None)
# Be sure nothing is in the context cache.
self.ctx.clear_cache()
# Run some code in a transaction.
def txn():
ctx = tasklets.get_context()
self.assertTrue(ctx is not self.ctx)
f1 = key1.get()
f2 = key1.get()
f1.name += 'a'
f1.put()
# Don't put f2.
# Verify the state of memcache.
self.assertEqual(memcache.get(skey1), context._LOCKED)
self.assertEqual(memcache.get(skey2), None)
self.ctx.transaction(txn).wait()
# Verify memcache is cleared.
self.assertEqual(memcache.get(skey1), None)
self.assertEqual(memcache.get(skey2), None)
# Clear the context cache.
self.ctx.clear_cache()
# Non-transactional get() updates memcache.
f1 = key1.get()
f2 = key2.get()
eventloop.run() # Wait for memcache.set() RPCs
self.assertNotEqual(memcache.get(skey1), None)
self.assertNotEqual(memcache.get(skey2), None)
def testContext_TransactionCallBackTasklet(self):
class Foo(model.Model):
n = model.IntegerProperty()
@tasklets.tasklet
def inner_callback():
self.assertTrue(tasklets.get_context().in_transaction())
x = yield Foo.get_or_insert_async('x', n=0)
x.n += 1
yield x.put_async()
raise tasklets.Return(x)
# 1. Regular case.
x = self.ctx.transaction(inner_callback).get_result()
self.assertEqual(x, Foo(n=1, id='x'))
x.key.delete()
# 2. Case for propagation=MANDATORY.
def outer_callback():
ctx = tasklets.get_context()
self.assertTrue(ctx.in_transaction())
f = ctx.transaction(
inner_callback, propagation=context.TransactionOptions.MANDATORY)
x = f.get_result()
self.assertEqual(x, Foo(n=1, id='x'))
return x
x = self.ctx.transaction(outer_callback).get_result()
x.key.delete()
# 3. Case for propagation=ALLOWED.
def outer_callback():
ctx = tasklets.get_context()
self.assertTrue(ctx.in_transaction())
f = ctx.transaction(
inner_callback, propagation=context.TransactionOptions.ALLOWED)
x = f.get_result()
self.assertEqual(x, Foo(n=1, id='x'))
return x
x = self.ctx.transaction(outer_callback).get_result()
x.key.delete()
def testTransaction_OnCommit(self):
self.ExpectWarnings()
class Counter(model.Model):
count = model.IntegerProperty(default=0)
@model.transactional
def trans1(fail=False, bad=None):
tasklets.get_context().call_on_commit(lambda: log.append('A'))
c = key.get()
c.count += 1
c.put()
if bad is not None:
tasklets.get_context().call_on_commit(bad)
tasklets.get_context().call_on_commit(lambda: log.append('B'))
if fail:
raise model.Rollback
# Successful transaction.
key = Counter().put()
log = []
trans1()
self.assertEqual(key.get().count, 1)
self.assertEqual(log, ['A', 'B'])
# Failing transaction.
key = Counter().put()
log = []
trans1(fail=True)
self.assertEqual(key.get().count, 0)
self.assertEqual(log, [])
# Raising callable in transaction.
key = Counter().put()
log = []
self.assertRaises(ZeroDivisionError, trans1, bad=lambda: 1/0)
self.assertEqual(key.get().count, 1)
self.assertEqual(log, ['A'])
# Bad callable in transaction.
key = Counter().put()
log = []
self.assertRaises(TypeError, trans1, bad=42)
self.assertEqual(key.get().count, 1)
self.assertEqual(log, ['A'])
# No transaction.
log = []
tasklets.get_context().call_on_commit(lambda: log.append('C'))
self.assertEqual(log, ['C'])
# Raising callable outside transaction.
log = []
self.assertRaises(ZeroDivisionError,
tasklets.get_context().call_on_commit, lambda: 1/0)
# Bad callable outside transaction.
log = []
self.assertRaises(TypeError, tasklets.get_context().call_on_commit, 42)
def testDefaultContextTransaction(self):
@tasklets.synctasklet
def outer():
ctx1 = tasklets.get_context()
@tasklets.tasklet
def inner():
ctx2 = tasklets.get_context()
self.assertTrue(ctx1 is not ctx2)
self.assertTrue(isinstance(ctx2._conn,
datastore_rpc.TransactionalConnection))
return 42
a = yield tasklets.get_context().transaction(inner)
ctx1a = tasklets.get_context()
self.assertTrue(ctx1 is ctx1a)
raise tasklets.Return(a)
b = outer()
self.assertEqual(b, 42)
def testExplicitTransactionClearsDefaultContext(self):
old_ctx = tasklets.get_context()
@tasklets.synctasklet
def outer():
ctx1 = tasklets.get_context()
@tasklets.tasklet
def inner():
ctx = tasklets.get_context()
self.assertTrue(ctx is not ctx1)
key = model.Key('Account', 1)
ent = yield key.get_async()
self.assertTrue(tasklets.get_context() is ctx)
self.assertTrue(ent is None)
raise tasklets.Return(42)
fut = ctx1.transaction(inner)
self.assertEqual(tasklets.get_context(), ctx1)
val = yield fut
self.assertEqual(tasklets.get_context(), ctx1)
raise tasklets.Return(val)
val = outer()
self.assertEqual(val, 42)
self.assertTrue(tasklets.get_context() is old_ctx)
def testKindError(self):
self.ExpectWarnings()
ctx = context.Context()
# If the cache is enabled, attempts to retrieve the object we just put will
# be satisfied from the cache, so the adapter we're testing will never get
# called.
ctx.set_cache_policy(lambda unused_key: False)
@tasklets.tasklet
def foo():
# Foo class is declared in query_test, so let's get a unusual class name.
key1 = model.Key(flat=('ThisModelClassDoesntExist', 1))
ent1 = model.Expando(key=key1, foo=42, bar='hello')
yield ctx.put(ent1)
yield ctx.get(key1)
self.assertRaises(model.KindError, foo().check_success)
def testMemcachePolicy(self):
# Bug reported by Jack Hebert.
class P(model.Model): pass
class Q(model.Model): pass
def policy(key): return key.kind() != 'P'
self.ctx.set_cache_policy(policy)
self.ctx.set_memcache_policy(policy)
k1 = model.Key(P, 1)
k2 = model.Key(Q, 1)
f1 = self.ctx.get(k1)
f2 = self.ctx.get(k2)
self.assertTrue(f1.get_result() is None)
self.assertTrue(f2.get_result() is None)
def testMemcacheDeleteThenGet(self):
# Test that memcache is written synchronously when datastore policy is off.
self.ctx.set_cache_policy(False)
self.ctx.set_datastore_policy(False)
self.ctx.set_memcache_policy(True)
class EmptyModel(model.Model):
pass
key = model.Key(EmptyModel, 1)
# Delete the key (just to be sure).
del_fut = self.ctx.delete(key)
del_fut.get_result()
# Create and store a new model instance using the key we just deleted.
# Because datastore policy is off, this attempts to write it to memcache.
EmptyModel(key=key).put()
# Verify that it is now in memcache.
get_fut = self.ctx.get(key)
ent = get_fut.get_result()
self.assertTrue(ent is not None,
'Memcache delete did block memcache set %r' % ent)
def testMemcacheAPI(self):
self.ExpectErrors()
@tasklets.tasklet
def foo():
ctx = tasklets.get_context()
k1 = 'k1'
k2 = u'k2'
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, [None, None])
v1 = '24'
v2 = 42
vv = yield ctx.memcache_set(k1, v1), ctx.memcache_set(k2, v2)
self.assertEqual(vv, [STORED, STORED])
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, [v1, v2])
vv = yield ctx.memcache_incr(k1), ctx.memcache_decr(k2)
self.assertEqual(vv, [25, 41])
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, ['25', 41])
vv = yield ctx.memcache_incr(k1, -1), ctx.memcache_decr(k2, -1)
self.assertEqual(vv, [24, 42])
vv = yield ctx.memcache_get(k1), ctx.memcache_get(k2)
self.assertEqual(vv, [v1, v2])
vv = yield ctx.memcache_add(k1, 'a'), ctx.memcache_add(k2, 'b')
self.assertEqual(vv, [NOT_STORED, NOT_STORED])
vv = yield ctx.memcache_replace(k1, 'a'), ctx.memcache_replace(k2, 'b')
self.assertEqual(vv, [STORED, STORED])
vv = yield ctx.memcache_delete(k1), ctx.memcache_delete(k2)
self.assertEqual(vv, [memcache.DELETE_SUCCESSFUL,
memcache.DELETE_SUCCESSFUL])
vv = yield ctx.memcache_delete(k1), ctx.memcache_delete(k2)
self.assertEqual(vv, [memcache.DELETE_ITEM_MISSING,
memcache.DELETE_ITEM_MISSING])
vv = yield ctx.memcache_incr(k1), ctx.memcache_decr(k2)
self.assertEqual(vv, [None, None])
vv = yield ctx.memcache_replace(k1, 'a'), ctx.memcache_replace(k2, 'b')
self.assertEqual(vv, [NOT_STORED, NOT_STORED])
vv = yield ctx.memcache_add(k1, 'a'), ctx.memcache_add(k2, 'b')
self.assertEqual(vv, [STORED, STORED])
logging.warn('Following two errors are expected:')
vv = yield ctx.memcache_incr(k1), ctx.memcache_decr(k2)
self.assertEqual(vv, [None, None])
foo().get_result()
def testMemcacheCAS(self):
@tasklets.tasklet
def foo():
c1 = context.Context()
c2 = context.Context()
k1 = u'k1'
k2 = 'k2'
yield c1.memcache_set(k1, 'a'), c1.memcache_set(k2, 'b')
vv = yield c2.memcache_get(k1), c2.memcache_get(k2)
self.assertEqual(vv, ['a', 'b'])
vv = yield c1.memcache_gets(k1), c1.memcache_get(k2, for_cas=True)
self.assertEqual(vv, ['a', 'b'])
ffff = [c1.memcache_cas(k1, 'x'), c1.memcache_cas(k2, 'y'),
c2.memcache_cas(k1, 'p'), c2.memcache_cas(k2, 'q')]
vvvv = yield ffff
self.assertEqual(vvvv, [STORED, STORED, NOT_STORED, NOT_STORED])
foo().get_result()
def testMemcacheErrors(self):
# See issue 94. http://goo.gl/E7OBH
# Install an error handler.
save_create_rpc = memcache.create_rpc
def fake_check_success(*args):
raise apiproxy_errors.Error('fake error')
def fake_create_rpc(*args, **kwds):
rpc = save_create_rpc(*args, **kwds)
rpc.check_success = fake_check_success
return rpc
try:
memcache.create_rpc = fake_create_rpc
val = self.ctx.memcache_get('key2').get_result()
self.assertEqual(val, None)
val = self.ctx.memcache_incr('key2').get_result()
self.assertEqual(val, None)
ok = self.ctx.memcache_set('key2', 'value2').get_result()
self.assertFalse(ok)
ok = self.ctx.memcache_delete('key2').get_result()
self.assertEqual(ok, memcache.DELETE_NETWORK_FAILURE)
finally:
memcache.create_rpc = save_create_rpc
def testMemcacheNamespaces(self):
@tasklets.tasklet
def foo():
k1 = 'k1'
k2 = 'k2'
ns = u'ns'
# Write two values in the namespace
s1, s2 = yield (self.ctx.memcache_set(k1, 42, namespace=ns),
self.ctx.memcache_add(k2, 100, namespace=ns))
self.assertEqual(s1, STORED)
self.assertEqual(s2, STORED)
# Check that they aren't in the default namespace
v1n, v2n = yield (self.ctx.memcache_get(k1),
self.ctx.memcache_get(k2))
self.assertEqual(v1n, None)
self.assertEqual(v2n, None)
# Read them back using get and gets
v1, v2 = yield (self.ctx.memcache_get(k1, namespace=ns),
self.ctx.memcache_gets(k2, namespace=ns))
self.assertEqual(v1, 42)
self.assertEqual(v2, 100)
# Write v1+1 back using replace, v2+1 using cas
s1, s2 = yield (self.ctx.memcache_replace(k1, v1 + 1, namespace=ns),
self.ctx.memcache_cas(k2, v2 + 1, namespace=ns))
self.assertEqual(s1, STORED)
self.assertEqual(s2, STORED)
# Apply incr/decr to both
v1, v2 = yield (self.ctx.memcache_incr(k1, delta=10, namespace=ns),
self.ctx.memcache_decr(k2, delta=10, namespace=ns))
self.assertEqual(v1, 53) # 42 + 1 + 10
self.assertEqual(v2, 91) # 100 + 1 - 100
# Delete both
s1, s2 = yield (self.ctx.memcache_delete(k1, namespace=ns),
self.ctx.memcache_delete(k2, namespace=ns))
self.assertEqual(s1, memcache.DELETE_SUCCESSFUL)
self.assertEqual(s2, memcache.DELETE_SUCCESSFUL)
foo().check_success()
def testMemcacheLocking(self):
# See issue 66. http://goo.gl/ANBns
self.ctx.set_cache_policy(False)
# Prepare: write some entity using Context.put().
class EmptyModel(model.Model):
pass
key = model.Key(EmptyModel, 1)
mkey = self.ctx._memcache_prefix + key.urlsafe()
ent = EmptyModel(key=key)
put_fut = self.ctx.put(ent)
eventloop.run0()
self.assertTrue(self.ctx._memcache_set_batcher._queues)
eventloop.run0()
self.assertTrue(self.ctx._memcache_set_batcher._running)
while self.ctx._memcache_set_batcher._running:
eventloop.run0()
# Verify that memcache now contains the special _LOCKED value.
val = memcache.get(mkey)
self.assertEqual(val, context._LOCKED)
put_fut.check_success()
# Verify that memcache _LOCKED value has been removed..
val = memcache.get(mkey)
self.assertEqual(val, None)
def testMemcacheDefaultNamespaceBatching(self):
self.ctx.set_datastore_policy(False)
key = model.Key('Foo', 1)
keyfut = key.get_async()
mfut = self.ctx.memcache_get('bar')
keyfut.check_success()
mfut.check_success()
log = MyAutoBatcher._log
self.assertEqual(len(log), 1, log)
def testAsyncInTransaction(self):
# See issue 81. http://goo.gl/F097l
class Bar(model.Model):
name = model.StringProperty()
bar = Bar(id='bar', name='bar')
bar.put()
@tasklets.tasklet
def trans():
bar = Bar.get_by_id('bar')
bar.name = 'updated-bar'
bar.put_async() # PROBLEM IS HERE, with yield it properly works
model.transaction_async(trans).get_result()
bar = bar.key.get()
self.assertEqual(bar.name, 'updated-bar')
def testMemcacheProtobufEncoding(self):
# Test that when memcache is used implicitly, it stores encoded
# protobufs, not pickled ones.
class Employee(model.Model):
_use_cache = False
e = Employee()
k = e.put(use_memcache=False)
k.get(use_memcache=True)
eventloop.run()
ks = self.ctx._memcache_prefix + k.urlsafe()
v = memcache.get(ks)
self.assertTrue(isinstance(v, str))
def testCorruptMemcache(self):
# Check that corrupt memcache entries silently fail.
self.ExpectWarnings()
self.ctx.set_cache_policy(False)
# Create a simple entity/key
class EmptyModel(model.Model):
pass
ent = EmptyModel()
key = ent.put()
# Prime memcache
key.get()
eventloop.run()
# Sanity check that memcache is primed
mkey = self.ctx._memcache_prefix + key.urlsafe()
self.assertEqual(memcache.get(mkey),
ent._to_pb(set_key=False).SerializePartialToString())
# Inject a corrupt memcache value
memcache.set(mkey, 'booby trap')
# Check that ndb ignores the corrupt memcache value
self.assertEqual(ent, key.get())
def testMemcacheRpcDeadline(self):
# This just verifies that create_rpc() is called with the correct
# deadline; that should be sufficient.
orig_create_rpc = memcache.create_rpc
def mock_create_rpc(deadline='invalid'):
# Fail if create_rpc() was called from _make_async_call().
self.assertNotEqual(deadline, 'invalid')
observed_deadlines.append(deadline)
return orig_create_rpc(deadline=deadline)
try:
memcache.create_rpc = mock_create_rpc
observed_deadlines = []
self.ctx.memcache_get('a').get_result()
self.assertEqual(observed_deadlines, [None])
observed_deadlines = []
self.ctx.memcache_get('a', deadline=1).get_result()
self.assertEqual(observed_deadlines, [1])
observed_deadlines = []
self.ctx.memcache_gets('a', deadline=2).get_result()
self.assertEqual(observed_deadlines, [2])
observed_deadlines = []
self.ctx.memcache_set('a', 'b', deadline=3).get_result()
self.assertEqual(observed_deadlines, [3])
observed_deadlines = []
self.ctx.memcache_add('a', 'b', deadline=4).get_result()
self.assertEqual(observed_deadlines, [4])
observed_deadlines = []
self.ctx.memcache_replace('a', 'b', deadline=5).get_result()
self.assertEqual(observed_deadlines, [5])
observed_deadlines = []
self.ctx.memcache_cas('a', 'b', deadline=6).get_result()
self.assertEqual(observed_deadlines, [6])
observed_deadlines = []
self.ctx.memcache_delete('a', deadline=7).get_result()
self.assertEqual(observed_deadlines, [7])
observed_deadlines = []
self.ctx.memcache_incr('a', deadline=8).get_result()
self.assertEqual(observed_deadlines, [8])
observed_deadlines = []
self.ctx.memcache_decr('a', deadline=9).get_result()
self.assertEqual(observed_deadlines, [9])
finally:
memcache.create_rpc = orig_create_rpc
def testMemcacheRpcDeadlineExceeded(self):
# A test where the deadline fails.
orig_create_rpc = memcache.create_rpc
def raise_deadline_error(*args):
observed_raises.append('raise')
raise apiproxy_errors.DeadlineExceededError('fake deadline')
def mock_create_rpc(deadline='invalid'):
# Fail if create_rpc() was called from _make_async_call().
self.assertNotEqual(deadline, 'invalid')
observed_deadlines.append(deadline)
rpc = orig_create_rpc(deadline=deadline)
# Monkey-patch the RPC instance.
rpc.check_success = raise_deadline_error
return rpc
try:
memcache.create_rpc = mock_create_rpc
observed_deadlines = []
observed_raises = []
key = model.Key('Kind', 'id')
ent = key.get(memcache_deadline=1)
self.assertEqual(ent, None)
# Three memcache calls should have been made (get, set, gets).
self.assertEqual(observed_deadlines, [1]*3)
self.assertEqual(observed_raises, ['raise']*3)
finally:
memcache.create_rpc = orig_create_rpc
def start_test_server(self):
host = '127.0.0.1'
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
for i in range(10):
port = random.randrange(32768, 60000)
try:
s.bind((host, port))
break
except socket.error:
continue
else:
self.fail('Could not find an unused port in 10 tries')
s.listen(1)
def run():
c, addr = s.accept()
s.close()
c.recv(1000) # Throw away request.
c.send('HTTP/1.0 200 Ok\r\n\r\n') # Emptiest response.
c.close()
t = threading.Thread(target=run)
t.setDaemon(True)
t.start()
return host, port
def testUrlFetch(self):
self.testbed.init_urlfetch_stub()
host, port = self.start_test_server()
fut = self.ctx.urlfetch('http://%s:%d' % (host, port))
result = fut.get_result()
self.assertEqual(result.status_code, 200)
self.assertTrue(isinstance(result.content, str))
def testTooBigForMemcache(self):
class Blobby(model.Model):
_use_memcache = True
_use_cache = False
blob = model.BlobProperty()
small = Blobby(blob='x')
huge = Blobby(blob='x'*1000000) # Fits in datastore, not in memcache
originals = [small, huge]
keys = model.put_multi(originals)
copies = model.get_multi(keys)
self.assertEqual(copies, originals) # Just to be sure
memcache_copies = model.get_multi(keys, use_datastore=False)
# Check that the small value did make it to memcache.
self.assertEqual(memcache_copies, [small, None])
# Test different path through the code when using use_datastore=False.
self.ExpectWarnings()
Blobby._use_datastore = False
small.key = model.Key(Blobby, "small")
huge.key = model.Key(Blobby, "huge")
# Create two Futures; this forces the AutoBatcher to combine the two.
fsmall = small.put_async()
fhuge = huge.put_async()
self.assertEqual(small.key, fsmall.get_result())
self.assertRaises(ValueError, fhuge.get_result)
self.assertEqual(small, small.key.get())
self.assertEqual(None, huge.key.get())
def testDatastoreConnectionIsRestored(self):
# See issue 209. http://goo.gl/7TEyM
class TestData(model.Model):
pass
@tasklets.tasklet
def txn():
conn1 = datastore._GetConnection()
self.assertTrue(
isinstance(conn1, datastore_rpc.TransactionalConnection), conn1)
yield TestData().put_async()
conn2 = datastore._GetConnection()
self.assertEqual(conn1, conn2)
@tasklets.synctasklet
def many_txns():
# Exactly how many transactions are needed to make this fail
# appears to be random. With 100 it always seems to fail
# (unless the bug is fixed).
conn_a = datastore._GetConnection()
ts = [model.transaction_async(txn) for i in range(100)]
conn_b = datastore._GetConnection()
self.assertEqual(conn_a, conn_b)
yield ts
conn_c = datastore._GetConnection()
self.assertEqual(conn_b, conn_c)
conn_before = datastore._GetConnection()
many_txns()
conn_after = datastore._GetConnection()
self.assertEqual(conn_before, conn_after)
def testMemcacheAndContextCache(self):
self.ctx.set_datastore_policy(True)
self.ctx.set_cache_policy(False)
self.ctx.set_memcache_policy(True)
class EmptyModel(model.Model):
pass
key = EmptyModel().put()
self.ctx.get(key).get_result() # pull entity into memcache
self.ctx.set_cache_policy(True)
f1, f2 = self.ctx.get(key), self.ctx.get(key)
e1, e2 = f1.get_result(), f2.get_result()
self.assertTrue(e1 is e2)
class ContextV3Tests(ContextTestMixin, test_utils.NDBTest):
"""Context tests that use a Datastore V3 connection."""
def setUp(self):
super(ContextV3Tests, self).setUp()
MyAutoBatcher.reset_log()
self.ctx = context.Context(
conn=model.make_connection(default_model=model.Expando),
auto_batcher_class=MyAutoBatcher)
tasklets.set_context(self.ctx)
def make_bad_transaction(*arg, **kwargs):
return datastore_rpc.datastore_pb.Transaction()
def testContext_TransactionAddTask(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
ctx = tasklets.get_context()
yield ctx.put(ent)
taskqueue.add(url='/', transactional=True)
yield self.ctx.transaction(callback)
foo().check_success()
@real_unittest.skipUnless(datastore_pbs._CLOUD_DATASTORE_ENABLED,
"V1 must be supported to run V1 tests.")
class ContextV1Tests(ContextTestMixin, test_utils.NDBCloudDatastoreV1Test):
"""Context tests that use a Cloud Datastore V1 connection."""
def setUp(self):
super(ContextV1Tests, self).setUp()
self.HRTest()
MyAutoBatcher.reset_log()
self.ctx = context.Context(
conn=model.make_connection(default_model=model.Expando,
_api_version=datastore_rpc._CLOUD_DATASTORE_V1),
auto_batcher_class=MyAutoBatcher)
tasklets.set_context(self.ctx)
def make_bad_transaction(*arg, **kwargs):
return ''
def testContext_TransactionAddTask(self):
self.ExpectWarnings()
key = model.Key('Foo', 1)
@tasklets.tasklet
def foo():
ent = model.Expando(key=key, bar=1)
@tasklets.tasklet
def callback():
ctx = tasklets.get_context()
yield ctx.put(ent)
taskqueue.add(url='/', transactional=True)
yield self.ctx.transaction(callback)
self.assertRaises(ValueError, foo().check_success)
class ContextFutureCachingTests(test_utils.NDBTest):
# See issue 62. http://goo.gl/5zLkK
def setUp(self):
super(ContextFutureCachingTests, self).setUp()
MyAutoBatcher.reset_log()
config = context.ContextOptions(max_get_keys=1, max_memcache_items=1)
self.ctx = context.Context(
conn=model.make_connection(default_model=model.Expando),
auto_batcher_class=MyAutoBatcher, config=config)
self.ctx.set_cache_policy(False)
tasklets.set_context(self.ctx)
def testGetFutureCachingOn(self):
self.ctx.set_memcache_policy(False)
class EmptyModel(model.Model):
pass
key = EmptyModel().put()
MyAutoBatcher.reset_log() # TODO Find out why put calls get_tasklet
self.ctx.set_cache_policy(True)
f1, f2 = self.ctx.get(key), self.ctx.get(key)
self.assertFalse(f1 is f2, 'Context get futures are being cached, '
'instead of tasklets.')
e1, e2 = f1.get_result(), f2.get_result()
self.assertTrue(e1 is e2, 'Results of concurrent gets are not the same '
'with future caching on.')
self.assertEqual(len(self.ctx._get_batcher._log), 1)
self.assertFalse(f1 is self.ctx.get(key), 'Future cache persisted.')
def testGetFutureCachingOff(self):
self.ctx.set_memcache_policy(False)
class EmptyModel(model.Model):
pass
key = EmptyModel().put()
MyAutoBatcher.reset_log() # TODO Find out why put calls get_tasklet
f1, f2 = self.ctx.get(key), self.ctx.get(key)
self.assertFalse(f1 is f2, 'Context get futures are being cached '
'with future caching off.')
e1, e2 = f1.get_result(), f2.get_result()
self.assertTrue(e1 is not e2, 'Results of concurrent gets are the same '
'with future caching off.')
self.assertEqual(len(self.ctx._get_batcher._log), 2)
def testMemcacheGetFutureCaching(self):
key = 'foo'
f1 = self.ctx.memcache_get(key, use_cache=True)
f2 = self.ctx.memcache_get(key, use_cache=True)
self.assertTrue(f1 is f2,
'Context memcache get futures are not cached.')
f3 = self.ctx.memcache_get(key)
self.assertFalse(f1 is f3,
'Context memcache get futures are cached by default.')
f1.check_success()
f4 = self.ctx.memcache_get(key, use_cache=True)
self.assertFalse(f1 is f4,
'Context memcache get future cached after result known.')
def testMemcacheSetFutureCaching(self):
key = 'foo'
value = 'bar'
f1 = self.ctx.memcache_set(key, value, use_cache=True)
f2 = self.ctx.memcache_set(key, value, use_cache=True)
self.assertTrue(f1 is f2,
'Context memcache get futures are not cached.')
f3 = self.ctx.memcache_set(key, value)
self.assertFalse(f1 is f3,
'Context memcache get futures are cached by default.')
f1.check_success()
f4 = self.ctx.memcache_set(key, value, use_cache=True)
self.assertFalse(f1 is f4,
'Context memcache get future cached after result known.')
if __name__ == '__main__':
unittest.main()
|
manager.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import socket
import os
import six
import copy
import logging
import signal
import random
import threading
import traceback
import subprocess
from paddle.distributed.fleet import cloud_utils
from paddle.distributed.fleet import launch_utils
logger = logging.getLogger("ELASTIC")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
fmt='%(name)s %(levelname)s %(asctime)s %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
ELASTIC_EXIT_CODE = 101
ELASTIC_AUTO_PARALLEL_EXIT_CODE = 102
# wait for timeout, unit: seconds
ELASTIC_TIMEOUT = 2 * 60
# keepalived ttl, unit: seconds
ELASTIC_TTL = 60
# 1: Fault tolerance, 2: Elastic
class ElasticLevel:
FAULT_TOLERANCE = 1
ELASTIC = 2
class ElasticStatus:
COMPLETED = "completed"
ERROR = "error"
HOLD = "hold"
RESTART = "restart"
EXIT = "exit"
class LauncherInterface(object):
def __init__(self, args):
self.args = args
self.procs = []
def _terminate_procs(self):
# try to terminate process by group, this happend in multiprocess senario in user process
if os.name != 'nt':
for p in self.procs:
if p.proc.poll() is None:
os.killpg(os.getpgid(p.proc.pid), signal.SIGTERM)
if p.log_fn:
p.log_fn.close()
logger.info("terminate process group gid:{}".format(
p.proc.pid))
time.sleep(1)
for p in self.procs:
if p.proc.poll() is None:
p.proc.terminate()
if p.log_fn:
p.log_fn.close()
logger.info("terminate process id:{}".format(p.proc.pid))
for step in range(0, 50):
alive = False
for p in self.procs:
if p.proc.poll() is None: # not termniate
os.kill(p.proc.pid, signal.SIGKILL)
alive = True
if not alive:
logger.info("terminated all the procs")
return True
time.sleep(1)
return False
def _check_procs(self):
alive = False
result = None
for p in self.procs:
ret = p.proc.poll()
if ret is None:
alive = True
elif ret != 0:
if ret == ELASTIC_AUTO_PARALLEL_EXIT_CODE:
logger.info("return form elastic auto parallel re-launch")
return ret
logger.error("ABORT!!! ABORT!!! ABORT!!!")
logger.error(
"ERROR rank {} error with exit code {}, check log for detail.".
format(p.rank, ret))
result = ret
if not alive and result is None:
return 0
else:
return result
def launch(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def watch(self):
raise NotImplementedError
class ElasticManager(object):
def __init__(self, args, etcd_client):
self.args = args
server = args.elastic_server or os.getenv('PADDLE_ELASTIC_SERVER')
name = args.job_id or os.getenv('PADDLE_ELASTIC_JOB_ID')
self.min_np, self.max_np = self._parse_np(args.np)
host = args.host or os.getenv('POD_IP')
scale = args.scale or int(os.getenv('PADDLE_ELASTIC_SCALE', 0))
force = args.force or os.getenv('PADDLE_ELASTIC_FORCE')
self.host = host if host else self._get_host()
(self.device_mode,
self.devices_per_proc) = launch_utils.get_device_proc_info(args)
self.elastic_timeout = int(
os.getenv('PADDLE_ELASTIC_TIMEOUT', ELASTIC_TIMEOUT))
elastic_ttl = int(os.getenv('PADDLE_ELASTIC_TTL', ELASTIC_TTL))
self.start_port = None
if cloud_utils.use_paddlecloud():
self.trainers = os.getenv('PADDLE_TRAINERS', '')
self.np = len(self.trainers.split(","))
self.start_port = int(os.getenv("PADDLE_PORT", "6170"))
self.dist_endpoints = os.getenv('DISTRIBUTED_TRAINER_ENDPOINTS', '')
trainer_endpoints = os.getenv('PADDLE_TRAINER_ENDPOINTS', '')
self.trainer_endpoints_list = trainer_endpoints.split(",")
else:
self.trainers = args.ips or os.getenv('PADDLE_TRAINERS', '')
node_ips = self.trainers.split(",")
self.np = len(node_ips)
self.start_port = int(os.getenv("FLAGS_START_PORT", "6170"))
self.dist_endpoints = self._host_to_endpoints(
node_ips, self.devices_per_proc, self.start_port)
self.trainer_endpoints_list = [
"%s:%d" % (ip, self.start_port) for ip in node_ips
]
self.curr_host = "%s:%d" % (self.host, self.start_port)
logger.info(f'start job with np={self.np}')
logger.info(
f"trainers={self.trainers}, trainer_endpoints_list={self.trainer_endpoints_list}"
)
# auto correct the value of elastic_level
# 1: Fault tolerant, 2: Elastic
self.elastic_level = int(
os.getenv('PADDLE_ELASTIC_FAULT_TOLERANC_LEVEL',
ElasticLevel.FAULT_TOLERANCE))
if self.min_np == self.max_np or \
(self.min_np > 0 and self.max_np == 0):
self.elastic_level = ElasticLevel.FAULT_TOLERANCE
logger.info(f'start job with ElasticLevel.FAULT_TOLERANCE')
if self.min_np > 0 and self.max_np > self.min_np:
self.elastic_level = ElasticLevel.ELASTIC
logger.info(f'start job with ElasticLevel.ELASTIC')
# compatible with kuberntes service discovery
if not server and os.getenv(
'PADDLE_ELASTIC_ETCD_SERVICE_HOST') and os.getenv(
'PADDLE_ELASTIC_ETCD_SERVICE_PORT'):
server = '{}:{}'.format(
os.getenv('PADDLE_ELASTIC_ETCD_SERVICE_HOST'),
os.getenv('PADDLE_ELASTIC_ETCD_SERVICE_PORT'))
logger.debug('init with server {} host {}'.format(server, host))
self.hosts = []
self.stopped = False
self.sigint = 0
self.need_sync = False
self.elastic_startup_time = None
if not server or ':' not in server or not name or not self.np:
logger.info(
'Elastic is not enabled with server {} name {} and np {}'.
format(server, name, self.np))
self.enable = False
return
else:
self.enable = True
self.etcd = etcd_client
# etcd data
self.prefix = "/paddle/" + name
self.node_prefix = self.prefix + '/nodes'
self.np_path = self.prefix + '/np'
self.endpoints_path = self.prefix + '/endpoints'
node_tag = ''.join(
random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(6))
self.host_path = '{}/{}{}'.format(self.node_prefix, node_tag,
time.time())
'''
0 group mode, be aware of healthy status of other workers
1 decouple mode, check own status only
'''
self.etcd.put(self.prefix, b'0')
# register callback
def host_call_back(event):
self.hosts = [
six.ensure_str(i[0])
for i in self.etcd.get_prefix(self.node_prefix)
]
self.hosts = list(set(self.hosts)) if self.hosts else self.hosts
logger.info(
f"host_call_back curr_host={self.curr_host}, hosts:{self.hosts}")
self.need_sync = True
self.elastic_startup_time = None
host_watch = self.etcd.add_watch_prefix_callback(self.node_prefix,
host_call_back)
host_lease = self.etcd.lease(elastic_ttl)
# register etcd lease heartbeat
def lease_heartbeat():
while True:
try:
host_lease.refresh()
hosts = [
six.ensure_str(i[0])
for i in self.etcd.get_prefix(self.node_prefix)
]
hosts = list(set(hosts)) if hosts else hosts
logger.info(
f"[lease_heartbeat] curr_host={self.curr_host}, hosts={hosts}"
)
if self.curr_host not in hosts:
logger.info(
f"[lease_heartbeat] register host={self.curr_host}")
self.etcd.put(self.host_path,
six.b(self.curr_host),
lease=host_lease)
except Exception as e:
logger.error("[lease_heartbeat] internal error:{} {}".
format(e, traceback.format_exc()))
break
time.sleep(elastic_ttl / 3)
keepalived_thread = threading.Thread(
name='lease_heartbeat', target=lease_heartbeat, daemon=True)
keepalived_thread.start()
self.etcd.put(self.host_path, six.b(self.curr_host), lease=host_lease)
# endpoints handle DISTRIBUTED_TRAINER_ENDPOINTS and PADDLE_TRAINERS
self.etcd.put(self.endpoints_path,
six.b('{}|{}'.format(self.dist_endpoints, self.trainers)))
def endpoints_call_back(event):
if not self.dist_endpoints:
return
edps = six.ensure_str(self.etcd.get(self.endpoints_path)[0] or '')
self.dist_endpoints, self.trainers = edps.split('|')
logger.info("set DISTRIBUTED_TRAINER_ENDPOINTS {} ".format(
self.dist_endpoints))
logger.info("set PADDLE_TRAINERS {} ".format(self.trainers))
endpoints_watch = self.etcd.add_watch_callback(self.endpoints_path,
endpoints_call_back)
self.watches = [host_watch, endpoints_watch]
self.launcher = None
def _host_to_endpoints(self,
ip_port_list: list,
devices_per_proc: list,
start_port: int=6170) -> str:
endpoint_list = []
for ip_port in ip_port_list:
endpoints = ip_port.split(":")
if len(endpoints) == 2:
ip = endpoints[0]
port = int(endpoints[1])
else:
ip = endpoints
port = start_port
ports = [x for x in range(port, port + len(devices_per_proc))]
endpoint_list.extend(["%s:%d" % (ip, port) for port in ports])
dist_endpoints = ','.join(endpoint_list)
return dist_endpoints
def exit(self, completed=False):
logger.info('manager exist completed {}'.format(completed))
if self.launcher:
self.launcher.stop()
if not self.enable:
return
if completed:
self.etcd.put(self.prefix, b'1')
for watch in self.watches:
self.etcd.cancel_watch(watch)
self.etcd.delete(self.host_path)
hosts = [i for i in self.etcd.get_prefix(self.node_prefix)]
if len(hosts) == 0:
self.etcd.delete_prefix(self.prefix)
def pre_hook(self):
if not self.args.elastic_pre_hook:
logger.info("skip pre_hook")
return
logger.info("execute pre_hook...")
current_env = copy.copy(os.environ.copy())
out, err = subprocess.Popen(
self.args.elastic_pre_hook,
env=current_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True).communicate()
if err:
logger.warn("pre_hook exec failed")
else:
logger.info(f"pre_hook exec result: {out.decode('utf-8').strip()}")
def _parse_np(self, np: str):
"""
np format is "MIN" or "MIN:MAX"
"""
np_str = np or os.getenv('PADDLE_ELASTIC_NP', "0")
np_dict = np_str.split(":")
min_np = max_np = 0
if len(np_dict) == 1:
# Fault tolerant
min_np = int(np_dict[0])
min_np = 1 if min_np <= 0 else min_np
max_np = 1
elif len(np_dict) == 2:
# Elastic
min_np = int(np_dict[0])
max_np = int(np_dict[1])
min_np = 1 if min_np <= 0 else min_np
max_np = min_np if min_np > max_np else max_np
else:
raise ValueError(
f'the np={np} needs to be in "MIN" or "MIN:MAX" format')
return min_np, max_np
def _get_host(self):
try:
return socket.gethostbyname(socket.getfqdn(socket.gethostname()))
except:
return '127.0.0.1'
def _completed(self):
if not self.enable:
return True
return int(self.etcd.get(self.prefix)[0]) == 1
def _match(self, host_list: list=None):
if host_list:
self.hosts = host_list
else:
self.hosts = [
six.ensure_str(i[0])
for i in self.etcd.get_prefix(self.node_prefix)
]
self.hosts = list(set(self.hosts)) if self.hosts else self.hosts
if self.elastic_level == ElasticLevel.FAULT_TOLERANCE:
if len(self.hosts) == self.np:
return True
else:
return False
if self.elastic_level == ElasticLevel.ELASTIC:
hosts_num = len(self.hosts)
if hosts_num == self.np:
return True
if not self.elastic_startup_time:
self.elastic_startup_time = time.time()
if hosts_num == self.max_np:
self.elastic_startup_time = None
return True
elif hosts_num >= self.min_np and hosts_num < self.max_np:
interval_time = time.time() - self.elastic_startup_time
if interval_time <= self.elastic_timeout:
logger.info(
f"wait for timeout, you can set value by PADDLE_ELASTIC_TIMEOUT, \
hosts_num={hosts_num}, min_np={self.min_np}, \
interval_time={interval_time}, elastic_timeout={self.elastic_timeout}"
)
return False
return True
else:
self.elastic_startup_time = None
return False
return False
def _update_endpoint(self, endpoints, hosts):
self.etcd.put(self.endpoints_path,
six.b('{}|{}'.format(endpoints, hosts)))
def _update_fault_tolrance(self):
rank = int(os.getenv('PADDLE_TRAINER_ID', -1))
logger.debug(
f"self.curr_host={self.curr_host}, self.dist_endpoints={self.dist_endpoints}"
)
if self.curr_host in self.dist_endpoints:
os.environ['DISTRIBUTED_TRAINER_ENDPOINTS'] = self.dist_endpoints
os.environ['PADDLE_TRAINERS'] = self.trainers
logger.info("update env DISTRIBUTED_TRAINER_ENDPOINTS {} ".format(
self.dist_endpoints))
logger.info("update env PADDLE_TRAINERS {} ".format(self.trainers))
return
# fault tolerance
idx = self.hosts.index(self.curr_host)
# swap if self.host not in the right position
if rank >= 0:
self.hosts[idx] = self.hosts[rank]
self.hosts[rank] = self.curr_host
else:
os.environ['PADDLE_TRAINER_ID'] = '{}'.format(idx)
hosts = ','.join([host_port.split(":")[0] for host_port in self.hosts])
self.args.ips = hosts
os.environ['PADDLE_TRAINERS'] = hosts
def _update_elastic_scale_out(self):
host_endpoints = copy.deepcopy(self.trainer_endpoints_list)
logger.info(
f"elastic scale out, from {len(self.hosts)} to {self.np}, hosts={self.hosts}, host_endpoints={host_endpoints}"
)
for curr_host_port in self.hosts:
if curr_host_port not in host_endpoints:
host_endpoints.append(curr_host_port)
os.environ['PADDLE_TRAINER_ID'] = '{}'.format(
host_endpoints.index(self.curr_host))
hosts = ','.join(
[host_port.split(":")[0] for host_port in host_endpoints])
self.args.ips = hosts
os.environ['PADDLE_TRAINERS'] = hosts
self.np = len(host_endpoints)
os.environ['PADDLE_TRAINER_ENDPOINTS'] = ','.join(host_endpoints)
os.environ['DISTRIBUTED_TRAINER_ENDPOINTS'] = self.dist_endpoints
self.trainer_endpoints_list = host_endpoints
def _update_elastic_scale_in(self):
host_endpoints = copy.deepcopy(self.trainer_endpoints_list)
logger.info(
f"elastic scale in, from {self.np} to {len(self.hosts)}, hosts={self.hosts}, host_endpoints={host_endpoints}"
)
# If scale in node from the first of the rank list, you need to minimize the movement of the rank
# eg:
# the source trainers is:10.10.10.0,10.10.10.1,10.10.10.2,10.10.10.3
# 10.10.10.0 is removed
# the new trainers is:10.10.10.3,10.10.10.1,10.10.10.2
# In this case, the rank of 10.10.10.1 and 10.10.10.2 remains unchanged, while the rank of 10.10.10.3 is set to rank0
endpoints_dict = dict()
unsorted_endpoints = []
for id, host_port in enumerate(self.hosts):
idx = host_endpoints.index(host_port)
if idx <= len(self.hosts) - 1 and not endpoints_dict.get(idx):
endpoints_dict[idx] = host_port
else:
unsorted_endpoints.append(host_port)
idle_index = 0
sorted_endpoints = []
for idx in range(len(self.hosts)):
if not endpoints_dict.get(idx) and len(unsorted_endpoints) > 0:
endpoints_dict[idx] = unsorted_endpoints[idle_index]
idle_index += 1
sorted_endpoints.append(endpoints_dict.get(idx))
logger.info(f"elastic scale in, sorted_endpoints={sorted_endpoints}")
self.trainer_endpoints_list = sorted_endpoints
ip_list = [ip_port.split(":")[0] for ip_port in sorted_endpoints]
hosts = ','.join(ip_list)
new_endpoints = self._host_to_endpoints(sorted_endpoints,
self.devices_per_proc)
self.args.ips = hosts
os.environ['PADDLE_TRAINER_ID'] = '{}'.format(
sorted_endpoints.index(self.curr_host))
os.environ['PADDLE_TRAINERS'] = hosts
self.np = len(sorted_endpoints)
os.environ['PADDLE_TRAINER_ENDPOINTS'] = ','.join(sorted_endpoints)
os.environ['DISTRIBUTED_TRAINER_ENDPOINTS'] = new_endpoints
self._update_endpoint(new_endpoints, hosts)
def _update_hosts(self):
assert len(self.hosts) != 0, 'hosts empty'
if self.elastic_level == ElasticLevel.FAULT_TOLERANCE:
self._update_fault_tolrance()
else:
# elastic
if len(self.hosts) == self.np:
logger.info(f"elastic startup, hosts={self.hosts}")
self._update_fault_tolrance()
elif len(self.hosts) > self.np:
# scale out
self._update_elastic_scale_out()
else:
# scale in
self._update_elastic_scale_in()
def wait(self):
if not self.enable:
return
idx = 1
while not self.stopped:
if self._match():
logger.info('ready with hosts {}'.format(self.hosts))
self._update_hosts()
return
logger.info('not ready for np {} with hosts {}'.format(self.np,
self.hosts))
idx += 1
time.sleep(2)
return
def run(self, launcher):
if self.stopped:
return
self.launcher = launcher(self.args)
self.launcher.launch()
def watch(self):
if self.need_sync:
self.need_sync = False
while not self.stopped:
ret = self.launcher.watch()
logger.debug(f"launcher.watch():{ret}")
if ret is not None: # self terminated
logger.info('job exit with code {}'.format(ret))
if ret == ELASTIC_AUTO_PARALLEL_EXIT_CODE:
logger.info('job re-launch for auto parallel')
self.launcher.stop()
return ElasticStatus.HOLD
# process is completed if ret >= 0 or error else
completed = True if ret == 0 else False
self.exit(completed=completed)
if completed:
return ElasticStatus.COMPLETED
if self.elastic_level == ElasticLevel.FAULT_TOLERANCE:
return ElasticStatus.RESTART
else:
return ElasticStatus.ERROR
if not self._completed() and (not self._match() or self.need_sync):
self.launcher.stop()
return ElasticStatus.HOLD
time.sleep(2)
if self.launcher:
self.launcher.stop()
return ElasticStatus.EXIT
def signal_handler(self, sigint, frame):
if self.enable:
self.exit()
self.sigint = sigint
self.stopped = True
|
client.py
|
import ssl
import json
import time
import socket
import logging
import random
import Queue
import threading
RECV_SIZE = 2 ** 16
CLIENT_VERSION = "0.0"
PROTO_VERSION = "1.0"
DEFAULT_HOST = "ecdsa.net"
DEFAULT_PORT = 50001
TIMEOUT = 5
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("stratum-client")
def encode_msg(msg):
return json.dumps(msg).encode() + b"\n"
def socket_reader(sock, queue):
f = sock.makefile()
while True:
try:
line = f.readline()
except socket.timeout:
continue
except (socket.error, ssl.SSLError) as e:
log.error("error reading from socket: %s", e)
break
if not line:
break
# log.debug(">>> %s", line.strip())
msg = json.loads(line.strip())
queue.put(msg)
sock.close()
def socket_writer(sock, queue):
while True:
msg = queue.get()
if not msg:
break
try:
payload = encode_msg(msg)
# log.debug("<<< %s", payload.strip())
sock.send(payload)
except (socket.error, ssl.SSLError) as e:
log.error("error writing from socket: %s", e)
break
sock.close()
class Connection(object):
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, ssl=False):
self.host = host
self.port = port
self.ssl = ssl
self.call_count = 0
self.socket = None
self.server_version = None
self.reader = None
self.writer = None
self.incoming = Queue.Queue()
self.outgoing = Queue.Queue()
self.connect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def create_socket(self):
sock = socket.create_connection((self.host, self.port), timeout=TIMEOUT)
return ssl.wrap_socket(sock) if self.ssl else sock
def connect(self):
log.debug("connecting to %s:%s...", self.host, self.port)
self.socket = self.create_socket()
log.info("connected to %s:%s", self.host, self.port)
self.reader = threading.Thread(target=socket_reader, args=(self.socket, self.incoming))
self.reader.setDaemon(True)
self.reader.start()
self.writer = threading.Thread(target=socket_writer, args=(self.socket, self.outgoing))
self.writer.setDaemon(True)
self.writer.start()
self.version()
def version(self):
self.server_version = self.call("server.version")["result"]
def close(self):
self.socket.close()
log.info("disconnected from %s:%s", self.host, self.port)
self.socket = None
def send(self, method, params):
msg = {"id": self.call_count, "method": method, "params": params}
self.call_count += 1
self.outgoing.put(msg)
def recv(self):
return self.incoming.get()
def call(self, method, *params):
t1 = time.time()
self.send(method, params)
resp = self.recv()
t2 = time.time()
delta = (t2 - t1) * 1000
log.debug("%s(%s) took %sms", method, params, delta)
return resp
class Peer(object):
ADDRESS_TYPE_CLEAR = "c"
ADDRESS_TYPE_ONION = "o"
ADDRESS_TYPE_ANY = "a"
PORT_TYPE_TCP = "t"
PORT_TYPE_SSL = "s"
PORT_TYPE_HTTP = "h"
PORT_TYPE_HTTPS = "g"
PORT_TYPES = (PORT_TYPE_TCP, PORT_TYPE_SSL, PORT_TYPE_HTTP, PORT_TYPE_HTTPS)
def __init__(self, addresses, params):
self.addresses = addresses
self.params = params
self.verison = params[0]
self.prune = None
self.ports = []
self.parse(params)
def parse(self, params):
for param in params:
if param[0] == "p":
self.prune = int(param[1:])
elif param[0] in self.PORT_TYPES:
peer_type = param[0]
if param[1:]:
port = int(param[1:])
elif peer_type == self.PORT_TYPE_TCP:
port = DEFAULT_PORT
elif peer_type == self.PORT_TYPE_SSL:
port = 50002
elif peer_type == self.PORT_TYPE_HTTP:
port = 8081
elif peer_type == self.PORT_TYPE_HTTPS:
port = 8082
if port:
self.ports.append((peer_type, port))
def __repr__(self):
return "Peer(addresses={}, params={})".format(self.addresses, self.params)
@classmethod
def discover(cls):
with Connection() as conn:
result = conn.call("server.peers.subscribe")
peers = result["result"]
return [Peer(peer[0:-1], peer[-1]) for peer in peers]
@property
def all_addresses(self):
return [address for address in self.addresses]
@property
def clearnet_addresses(self):
return [address for address in self.addresses if not is_onion(address)]
@property
def onion_addresses(self):
return [address for address in self.addresses if is_onion(address)]
def get_ports_by_type(self, port_type):
return [port for pt, port in self.ports if port_type == pt]
tcp_ports = property(lambda self: self.get_ports_by_type(self.PORT_TYPE_TCP))
ssl_ports = property(lambda self: self.get_ports_by_type(self.PORT_TYPE_SSL))
http_ports = property(lambda self: self.get_ports_by_type(self.PORT_TYPE_HTTP))
https_ports = property(lambda self: self.get_ports_by_type(self.PORT_TYPE_HTTPS))
def is_onion(address):
return address.endswith(".onion")
class ConnectionHandler(object):
def __init__(self, connection_pool):
self.connection_pool = connection_pool
self.connection = None
def __enter__(self):
self.connection = self.connection_pool.take()
return self.connection
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection_pool.release(self.connection)
def connect_to_peer(peers, allow_tcp=True, address_type=Peer.ADDRESS_TYPE_CLEAR):
for _ in range(100):
peer = random.choice(peers)
if address_type == Peer.ADDRESS_TYPE_CLEAR:
addresses = peer.clearnet_addresses
elif address_type == Peer.ADDRESS_TYPE_ONION:
addresses = peer.onion_addresses
else:
addresses = peer.all_addresses
if not addresses:
continue
ports = peer.ssl_ports
has_ssl = bool(ports)
if not has_ssl and allow_tcp:
ports = peer.tcp_ports
if not ports:
continue
address = addresses[0]
port = ports[0]
try:
return Connection(host=address, port=port, ssl=has_ssl)
except socket.error as e:
log.error("could not connect to %s: %s", address, e)
class ConnectionPool(object):
def __init__(self, max_size):
self.connections = Queue.Queue()
self.peers = Peer.discover()
self.max_size = max_size
self.count = 0
def get(self):
return ConnectionHandler(self)
def close(self):
while not self.connections.empty():
connection = self.connections.get_nowait()
connection.close()
def release(self, connection):
self.connections.put(connection)
def new_connection(self):
conn = connect_to_peer(self.peers)
if conn:
self.count += 1
return conn
def take(self):
block = self.count >= self.max_size
try:
connection = self.connections.get(block=block)
except Queue.Empty:
connection = self.new_connection()
return connection
if __name__ == "__main__":
pool = ConnectionPool(1)
for _ in range(3):
with pool.get() as conn:
rv = conn.call("server.version")
print conn.host, conn.server_version
print rv["result"]
time.sleep(1)
pool.close()
|
managers.py
|
#
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import os
import sys
import weakref
import threading
import array
import queue
from traceback import format_exc
from pickle import PicklingError
from multiprocessing import Process, current_process, active_children, Pool, util, connection
from multiprocessing.process import AuthenticationString
from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler
from multiprocessing.util import Finalize, info
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tostring())
ForkingPickler.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
ForkingPickler.register(view_type, rebuild_as_list)
import copyreg
copyreg.pickle(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return 'Token(typeid=%r, address=%r, id=%r)' % \
(self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind == '#TRACEBACK':
assert type(result) is str
return RemoteError(result)
elif kind == '#UNSERIALIZABLE':
assert type(result) is str
return RemoteError('Unserializable message: %s\n' % result)
else:
return ValueError('Unrecognized message type')
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if hasattr(func, '__call__'):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
assert isinstance(authkey, bytes)
self.registry = registry
self.authkey = AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=5)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.mutex = threading.RLock()
self.stop = 0
def serve_forever(self):
'''
Run the server forever
'''
current_process()._manager_server = self
try:
try:
while 1:
try:
c = self.listener.accept()
except (OSError, IOError):
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
except (KeyboardInterrupt, SystemExit):
pass
finally:
self.stop = 999
self.listener.close()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop:
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
obj, exposed, gettypeid = id_to_obj[ident]
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', repr(msg)))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
self.mutex.acquire()
try:
result = []
keys = list(self.id_to_obj.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
finally:
self.mutex.release()
def number_of_objects(self, c):
'''
Number of shared objects
'''
return len(self.id_to_obj) - 1 # don't count ident='0'
def shutdown(self, c):
'''
Shutdown this process
'''
try:
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
if sys.stdout != sys.__stdout__:
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
util._run_finalizers(0)
for p in active_children():
util.debug('terminating a child process of manager')
p.terminate()
for p in active_children():
util.debug('terminating a child process of manager')
p.join()
util._run_finalizers()
util.info('manager exiting with exitcode 0')
except:
import traceback
traceback.print_exc()
finally:
exit(0)
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
self.mutex.acquire()
try:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
assert len(args) == 1 and not kwds
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
assert type(method_to_typeid) is dict
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
# increment the reference count immediately, to avoid
# this object being garbage collected before a Proxy
# object for it can be created. The caller of create()
# is responsible for doing a decref once the Proxy object
# has been created.
self.incref(c, ident)
return ident, tuple(exposed)
finally:
self.mutex.release()
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
self.mutex.acquire()
try:
self.id_to_refcount[ident] += 1
finally:
self.mutex.release()
def decref(self, c, ident):
self.mutex.acquire()
try:
assert self.id_to_refcount[ident] >= 1
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_obj[ident], self.id_to_refcount[ident]
util.debug('disposing of obj with id %r', ident)
finally:
self.mutex.release()
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle'):
if authkey is None:
authkey = current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
def __reduce__(self):
return type(self).from_address, \
(self._address, self._authkey, self._serializer)
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
assert self._state.value == State.INITIAL
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
assert self._state.value == State.INITIAL
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
self._process.join(timeout)
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=0.2)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
address = property(lambda self: self._address)
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()):
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True):
BaseProxy._mutex.acquire()
try:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
finally:
BaseProxy._mutex.release()
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
if authkey is not None:
self._authkey = AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if Popen.thread_is_spawning():
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %s>' % \
(type(self).__name__, self._token.typeid, '0x%x' % id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
If possible the shared object is returned, or otherwise a proxy for it.
'''
server = getattr(current_process(), '_manager_server', None)
if server and server.address == token.address:
return server.id_to_obj[token.id][0]
else:
incref = (
kwds.pop('incref', True) and
not getattr(current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return an proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True):
return self._callmethod('acquire', (blocking,))
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self):
return self._callmethod('notify')
def notify_all(self):
return self._callmethod('notify_all')
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__delslice__',
'__getitem__', '__getslice__', '__len__', '__mul__',
'__reversed__', '__rmul__', '__setitem__', '__setslice__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__'
)) # XXX __getslice__ and __setslice__ unneeded in Py3.0
PoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'terminate'
))
PoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Pool', Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
ev_farmer.py
|
from pokeAPI import *
import time
from threading import Thread
from prompt_toolkit import HTML, print_formatted_text
from prompt_toolkit.styles import Style
import sys
import os
import random
driver = pokeAPI().register_window(name="PokeММO")
wx, wy = driver.get_window_rect()[:2]
ROUND_COUNT = 0
print = print_formatted_text
style = Style.from_dict({
'msg': '#71f076 bold',
'sub-msg': '#616161 italic'
})
def display_metrics():
while(True):
global ROUND_COUNT
driver.clear_console()
#Current dungeon run number
str_buffer = str(ROUND_COUNT)
print(HTML(
u'<b>></b> <ansicyan><u>Current Dungeon Run</u></ansicyan>'+"<b> : </b>"+'<i><ansigrey>'+str_buffer+'</ansigrey></i>'
), style=style)
time.sleep(1)
def navigate_to_low_level_grass():
driver.hold_key('left',0.6)
driver.hold_key('up',1.3)
driver.hold_key('right',1.7)
driver.hold_key('up',0.50)
driver.hold_key('right',.5)
driver.hold_key('up',2.4)
def navigate_to_speed_ev():
print("Going to pokecenter")
driver.use_pokecenter(location="lacunosa")
print("Navigating to Rapidash Grass Patches")
driver.press_key('down')
#get on bike
driver.toggle_bike()
driver.hold_key('left',random.uniform(3, 3.3))
driver.toggle_bike()
driver.press_key('up')
driver.press_key('up')
driver.press_key('up')
def navigate_to_hp_ev():
print("Going to pokecenter")
driver.use_pokecenter(location="opelucid")
print("Navigating to Buffoulant Grass Patches")
#get on bike
driver.toggle_bike()
driver.hold_key('left',1.4)
driver.hold_key('up',2.15)
driver.hold_key('right',1.90)
driver.hold_key('up',6.5)
driver.hold_key('right',2.7)
driver.toggle_bike()
driver.hold_key('up',.15)
driver.toggle_bike()
driver.hold_key('right',1.8)
driver.toggle_bike()
driver.hold_key('up',.3)
def navigate_to_attack_ev():
print("Going to pokecenter")
driver.use_pokecenter(location="opelucid")
print("Navigating to Buffoulant Grass Patches")
#get on bike
driver.toggle_bike()
driver.hold_key('left',1.4)
driver.hold_key('up',2.15)
driver.hold_key('right',1.90)
driver.hold_key('up',5.5)
driver.hold_key('right',random.uniform(.1,.2))
driver.toggle_bike()
driver.hold_key('up',.2)
def navigate_to_spattack_ev():
print("Going to pokecenter")
driver.use_pokecenter(location="opelucid")
print("Navigating to Duosion Grass Patches")
driver.hold_key('down',.05)
#get on bike
driver.toggle_bike()
driver.hold_key('left',6.8)
time.sleep(1.5)
driver.hold_key('left',random.uniform(.25,.4))
driver.hold_key('down',.2)
driver.toggle_bike()
driver.press_key('down')
def navigate_to_spdef_ev():
print("Going to pokecenter")
driver.use_pokecenter(location="undella")
print("Navigating to Mantine Surf Patches")
driver.hold_key('left',.9)
driver.hold_key('down',1.8)
driver.surf()
def navigate_to_def_ev():
print("Going to pokecenter")
driver.use_pokecenter(location="undella")
print("Navigating to Mantine Surf Patches")
driver.hold_key('left',.9)
driver.hold_key('down',.8)
driver.toggle_bike()
driver.hold_key('right',2.5)
driver.toggle_bike()
driver.surf()
def navigate_to_luvdisc():
print("Going to pokecenter")
driver.use_pokecenter(location="undella")
print("Navigating to luvdisc Patch")
driver.hold_key('left',.9)
driver.hold_key('down',.8)
driver.toggle_bike()
driver.hold_key('right',random.uniform(.5,1.5))
driver.press_key('down')
def farm_heartscales(hotkey='4'):
navigate_to_luvdisc()
#sweet scent can be used 6 times before needing to go to pokecenter
currentPP = 10
print("Starting farm loop")
while(currentPP>0):
print("Looking for battle")
while((not driver.is_in_battle()) and (not driver.is_in_horde())):
print("In battle: "+str(driver.is_in_battle()))
driver.fish(hotkey=hotkey)
print("Battle Found")
in_battle = True
#wait for battle options to pop up
if (driver.is_in_horde()):
print("Scanning for Horde Shinies")
if(driver.is_shiny_horde() == False):
print("Shiny not found")
print("Using AOE attack")
driver.flee_from_battle()
print("Fleeing from horde battle")
time.sleep(random.uniform(12,15))
in_battle = False
else:
print("Shiny found!!!")
print("Stalling until human services game")
pokeTwilio.found_shiny_call()
while True:
time.sleep(random.uniform(60, 300))
driver.stall_battle()
else:
while(in_battle):
print("Scanning for Single-Battle Shinies")
if(driver.is_shiny_single()==False):
print("Shiny not found")
print("Using first attack")
driver.use_first_attack()
print("Attack animations & battle close")
time.sleep(random.uniform(12,15))
#if battle is over set loop flag to false
if(not driver.is_in_battle()):
print("Enemy pokemon was defeated")
in_battle = False
else:
print("Shiny found!!!")
print("Stalling until human services game")
pokeTwilio.found_shiny_call()
while True:
time.sleep(random.uniform(60, 300))
driver.stall_battle()
currentPP -= 1
def farm_evs(ev_type = "def"):
#Start bot underneath poke-center exit facing down
if(ev_type=="attack"):
navigate_to_attack_ev()
elif(ev_type=="spattack"):
navigate_to_spattack_ev()
elif(ev_type=="speed"):
navigate_to_speed_ev()
elif(ev_type=="spdef"):
navigate_to_spdef_ev()
elif(ev_type=="def"):
navigate_to_def_ev()
elif(ev_type=="hp"):
navigate_to_hp_ev()
#sweet scent can be used 6 times before needing to go to pokecenter
currentPP = 6
print("Starting farm loop")
while(currentPP>0):
print("Looking for battle")
while((not driver.is_in_battle()) and (not driver.is_in_horde())):
driver.sweet_scent()
print("Battle Found")
in_battle = True
#wait for battle options to pop up
if (driver.is_in_horde()):
print("Scanning for Horde Shinies")
if(driver.is_shiny_horde() == False):
print("Shiny not found")
print("Using AOE attack")
driver.use_first_attack(is_horde=True)
print("Attack animations & battle close")
time.sleep(12)
in_battle = False
else:
print("Shiny found!!!")
print("Stalling until human services game")
pokeTwilio.found_shiny_call()
while True:
time.sleep(random.uniform(60, 300))
driver.stall_battle()
else:
while(in_battle):
print("Scanning for Single-Battle Shinies")
if(driver.is_shiny_single()==False):
print("Shiny not found")
print("Using AOE attack")
driver.flee_from_battle()
print("Attack animations & battle close")
time.sleep(12)
#if battle is over set loop flag to false
if(not driver.is_in_battle()):
print("Enemy pokemon was defeated")
in_battle = False
else:
print("Shiny found!!!")
print("Stalling until human services game")
pokeTwilio.found_shiny_call()
while True:
time.sleep(random.uniform(60, 300))
driver.stall_battle()
print("Holding z through any prompts")
driver.hold_key('z',3)
currentPP -= 1
metric_thread = Thread(target=display_metrics,args=())
try:
#metric_thread.start()
while True:
#farm_evs("def")
farm_heartscales()
ROUND_COUNT += 1
except KeyboardInterrupt:
print ('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
RID_2.0.2.py
|
'''
This is a script for scrapping pseudo-random images from Imgur.
Implemented is the addition of threading and functions for a GUI
'''
import random
import string
import urllib
import os
import imghdr
import threading
length = 6
directory = 'test10'
name = 'threads'
def swap_dir(user_dir):
if os.path.exists(user_dir):
os.chdir(user_dir)
else:
os.makedirs(user_dir)
os.chdir(user_dir)
def generate_string(length):
random_string = []
for i in xrange(0, length):
char = random.choice(string.letters + string.digits)
random_string.append(char)
return ''.join(random_string)
def convert_string(string):
img_name = 'http://i.imgur.com/' + string + '.jpg'
return img_name
def download_save(web_addr, name_string):
try:
web_object = urllib.urlopen(web_addr)
except Exception:
return
output_write = open(name_string, 'wb')
output_write.write(web_object.read())
output_write.close()
image_type = imghdr.what(name_string)
complete_name = name_string + '.' + str(image_type)
if image_type is None:
os.remove(name_string)
else:
os.rename(name_string, name_string + '.' + image_type)
try:
if os.path.getsize(complete_name) < 1 * 1024:
os.remove(complete_name)
except WindowsError:
pass
def function_thread(name, length):
x = generate_string(length)
y = convert_string(x)
download_save(y, x)
def Main():
if threading.active_count() <= 200:
threading.Thread(target=function_thread, args=(name, 5)).start()
print threading.active_count()
if __name__ == '__main__':
swap_dir(directory)
while True:
Main()
|
dark_reaper.py
|
# Copyright 2016-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vgaronne@gmail.com>, 2016-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016
# - Thomas Beermann <thomas.beermann@cern.ch>, 2016
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
'''
Dark Reaper is a daemon to manage quarantined file deletion.
'''
import hashlib
import logging
import os
import random
import socket
import sys
import threading
import time
import traceback
from rucio.common.config import config_get
from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,
RSEAccessDenied, ResourceTemporaryUnavailable)
from rucio.core import rse as rse_core
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.quarantined_replica import (list_quarantined_replicas,
delete_quarantined_replicas,
list_rses)
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
GRACEFUL_STOP = threading.Event()
def reaper(rses=[], worker_number=1, total_workers=1, chunk_size=100, once=False, scheme=None):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
"""
logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, str(rses))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))
nothing_to_do = True
random.shuffle(rses)
for rse in rses:
replicas = list_quarantined_replicas(rse=rse,
limit=chunk_size, worker_number=worker_number,
total_workers=total_workers)
rse_info = rsemgr.get_rse_info(rse)
rse_protocol = rse_core.get_rse_protocols(rse)
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
deleted_replicas = []
try:
prot.connect()
for replica in replicas:
nothing_to_do = False
try:
pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,
lfns=[{'scope': replica['scope'], 'name': replica['name'], 'path': replica['path']}],
operation='delete', scheme=scheme).values()[0])
logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse)
start = time.time()
prot.delete(pfn)
duration = time.time() - start
logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, duration)
add_message('deletion-done', {'scope': replica['scope'],
'name': replica['name'],
'rse': rse,
'file-size': replica.get('bytes') or 0,
'bytes': replica.get('bytes') or 0,
'url': pfn,
'duration': duration})
deleted_replicas.append(replica)
except SourceNotFound:
err_msg = 'Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse)
logging.warning(err_msg)
deleted_replicas.append(replica)
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
err_msg = 'Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, str(error))
logging.warning(err_msg)
add_message('deletion-failed', {'scope': replica['scope'],
'name': replica['name'],
'rse': rse,
'file-size': replica['bytes'] or 0,
'bytes': replica['bytes'] or 0,
'url': pfn,
'reason': str(error)})
except:
logging.critical(traceback.format_exc())
finally:
prot.close()
delete_quarantined_replicas(rse=rse, replicas=deleted_replicas)
if once:
break
if once:
break
if nothing_to_do:
logging.info('Dark Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers)
time.sleep(60)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,
exclude_rses=None, include_rses=None, delay_seconds=0, all_rses=False):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param threads_per_worker: Total number of threads created by each worker.
:param once: If True, only runs one iteration of the main loop.
:param greedy: If True, delete right away replicas with tombstone.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
"""
logging.info('main: starting processes')
if all_rses:
rses = list_rses()
elif not rses:
rses = [rse['rse'] for rse in rse_core.list_rses()]
threads = []
for worker in range(total_workers):
kwargs = {'worker_number': worker,
'total_workers': total_workers,
'rses': rses,
'once': once,
'chunk_size': chunk_size,
'scheme': scheme}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
map_reduce.py
|
r"""
Parallel computations using RecursivelyEnumeratedSet and Map-Reduce
There is an efficient way to distribute computations on a set
`S` of objects defined by :func:`RecursivelyEnumeratedSet`
(see :mod:`sage.sets.recursively_enumerated_set` for more details)
over which one would like to perform the following kind of operations:
* Compute the cardinality of a (very large) set defined recursively
(through a call to :class:`RecursivelyEnumeratedSet
of forest type<sage.combinat.backtrack.SearchForest>`)
* More generally, compute any kind of generating series over this set
* Test a conjecture, e.g. find an element of `S` satisfying a specific
property, or check that none does or that they all do
* Count/list the elements of `S` that have a specific property
* Apply any map/reduce kind of operation over the elements of `S`
AUTHORS:
- Florent Hivert -- code, documentation (2012--2016)
- Jean Baptiste Priez -- prototype, debugging help on MacOSX (2011-June, 2016)
- Nathann Cohen -- some documentation (2012)
Contents
--------
- :ref:`basic-usage`
- :ref:`advanced-use`
- :ref:`profiling`
- :ref:`logging`
- :ref:`protocol-description`
- :ref:`examples`
How is this different from usual MapReduce ?
--------------------------------------------
This implementation is specific to :class:`RecursivelyEnumeratedSet
of forest type<sage.combinat.backtrack.SearchForest>`, and uses its
properties to do its job. Not only mapping and reducing but also
**generating the elements** of `S` is done on different processors.
.. _basic-usage:
How can I use all that stuff?
-----------------------------
First, you need to set the environment variable `SAGE_NUM_THREADS` to the
desired number of parallel threads to be used:
sage: import os # not tested
sage: os.environ["SAGE_NUM_THREADS"] = '8' # not tested
Second, you need the information necessary to describe a
:class:`RecursivelyEnumeratedSet of forest
type<sage.combinat.backtrack.SearchForest>` representing your set `S` (see
:mod:`sage.sets.recursively_enumerated_set`). Then, you need to provide a
"map" function as well as a "reduce" function. Here are some examples:
* **Counting the number of elements.** In this situation, the map function
can be set to ``lambda x: 1``, and the reduce function just adds the
values together, i.e. ``lambda x, y: x + y``.
We count binary words of length `\leq 16`::
sage: seeds = [[]]
sage: succ = lambda l: [l + [0], l + [1]] if len(l) < 16 else []
sage: S = RecursivelyEnumeratedSet(seeds, succ,
....: structure='forest', enumeration='depth')
sage: map_function = lambda x: 1
sage: reduce_function = lambda x, y: x + y
sage: reduce_init = 0
sage: S.map_reduce(map_function, reduce_function, reduce_init)
131071
This matches the number of binary words of length `\leq 16`::
sage: factor(131071 + 1)
2^17
Note that the map and reduce functions here have the default values of the
:meth:`sage.combinat.backtrack.SearchForest.map_reduce` method
so that the number of elements can be obtained more simply with::
sage: S.map_reduce()
131071
Instead of using :func:`RecursivelyEnumeratedSet`, one can directly use
:class:`RESetMapReduce`, which gives finer
control over the parallel execution (see :ref:`advanced-use` below)::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: S.run()
131071
* **Generating series.** For this, take a Map function that associates a
monomial to each element of `S`, while the Reduce function is still equal to
``lambda x, y: x + y``.
We compute the generating series for counting binary words of each
length `\leq 16`::
sage: S = RecursivelyEnumeratedSet(
....: [[]], lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: structure='forest', enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(
....: map_function=lambda z: x**len(z),
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
sage: sp
65536*x^16 + 32768*x^15 + 16384*x^14 + 8192*x^13 + 4096*x^12
+ 2048*x^11 + 1024*x^10 + 512*x^9 + 256*x^8 + 128*x^7 + 64*x^6
+ 32*x^5 + 16*x^4 + 8*x^3 + 4*x^2 + 2*x + 1
This is of course `\sum_{i=0}^{16} (2x)^i`::
sage: sp == sum((2*x)^i for i in range(17))
True
Here is another example where we count permutations of size `\leq 8` (here
we use the default values)::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l)] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
This is of course `\sum_{i=0}^{8} i! x^i`::
sage: sp == sum(factorial(i)*x^i for i in range(9))
True
* **Post Processing.** We now demonstrate the use of ``post_process``. We
generate the permutation as previously, but we only perform the map/reduce
computation on those of even ``len``. Of course we get the even part of the
previous generating series::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 8 else []),
....: post_process=lambda l: l if len(l) % 2 == 0 else None,
....: structure='forest',
....: enumeration='depth')
sage: sp = S.map_reduce(lambda z: x**len(z)); sp
40320*x^8 + 720*x^6 + 24*x^4 + 2*x^2 + 1
This is also useful for example to call a constructor on the generated
elements::
sage: S = RecursivelyEnumeratedSet(
....: [[]],
....: lambda l: ([l[:i] + [len(l) + 1] + l[i:]
....: for i in range(len(l) + 1)] if len(l) < 5 else []),
....: post_process=lambda l: Permutation(l) if len(l) == 5 else None,
....: structure='forest',
....: enumeration='depth')
sage: x = polygen(ZZ)
sage: sp = S.map_reduce(lambda z: x**z.number_of_inversions()); sp
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
We get here a polynomial which is the `q`-factorial (in the variable `x`) of `5`,
that is, `\prod_{i=1}^{5} \frac{1-x^i}{1-x}`::
sage: x = polygen(ZZ)
sage: prod((1-x^i)//(1-x) for i in range(1, 6))
x^10 + 4*x^9 + 9*x^8 + 15*x^7 + 20*x^6 + 22*x^5 + 20*x^4 + 15*x^3 + 9*x^2 + 4*x + 1
Compare::
sage: from sage.combinat.q_analogues import q_factorial
sage: q_factorial(5)
q^10 + 4*q^9 + 9*q^8 + 15*q^7 + 20*q^6 + 22*q^5 + 20*q^4 + 15*q^3 + 9*q^2 + 4*q + 1
* **Listing the objects.** One can also compute the list of objects in a
:class:`RecursivelyEnumeratedSet of forest type<sage.combinat.backtrack.SearchForest>`
using :class:`RESetMapReduce`. As an example, we compute the set of numbers
between 1 and 63, generated by their binary expansion::
sage: S = RecursivelyEnumeratedSet(
....: [1],
....: lambda l: [(l<<1)|0, (l<<1)|1] if l < 1<<5 else [],
....: structure='forest',
....: enumeration='depth')
Here is the list computed without :class:`RESetMapReduce`::
sage: serial = list(S)
sage: serial
[1, 2, 4, 8, 16, 32, 33, 17, 34, 35, 9, 18, 36, 37, 19, 38, 39, 5, 10,
20, 40, 41, 21, 42, 43, 11, 22, 44, 45, 23, 46, 47, 3, 6, 12, 24, 48,
49, 25, 50, 51, 13, 26, 52, 53, 27, 54, 55, 7, 14, 28, 56, 57, 29, 58,
59, 15, 30, 60, 61, 31, 62, 63]
Here is how to perform the parallel computation. The order of the lists
depends on the synchronisation of the various computation processes and
therefore should be considered as random::
sage: parall = S.map_reduce(lambda x: [x], lambda x, y: x + y, [])
sage: parall # random
[1, 3, 7, 15, 31, 63, 62, 30, 61, 60, 14, 29, 59, 58, 28, 57, 56, 6, 13,
27, 55, 54, 26, 53, 52, 12, 25, 51, 50, 24, 49, 48, 2, 5, 11, 23, 47,
46, 22, 45, 44, 10, 21, 43, 42, 20, 41, 40, 4, 9, 19, 39, 38, 18, 37,
36, 8, 17, 35, 34, 16, 33, 32]
sage: sorted(serial) == sorted(parall)
True
.. _advanced-use:
Advanced use
------------
Fine control over the execution of a map/reduce computation is achieved
via parameters passed to the :meth:`RESetMapReduce.run` method.
The following three parameters can be used:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import (RESetMPExample, AbortError)
sage: EX = RESetMPExample(maxl=100)
sage: try:
....: res = EX.run(timeout=0.01)
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: EX = RESetMPExample(maxl=8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
As for ``reduce_locally``, one should not see any difference, except for speed
during normal usage. Most of the time one should leave it set to ``True``,
unless one sets up a mechanism to consume the partial results as soon as they
arrive. See :class:`RESetParallelIterator` and in particular the ``__iter__``
method for a example of consumer use.
.. _profiling:
Profiling
---------
It is possible to profile a map/reduce computation. First we create a
:class:`RESetMapReduce` object::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(
....: roots=[[]],
....: children=lambda l: [l + [0], l + [1]] if len(l) < 16 else [],
....: map_function=lambda x: 1,
....: reduce_function=lambda x, y: x + y,
....: reduce_init=0)
The profiling is activated by the ``profile`` parameter. The value provided
should be a prefix (including a possible directory) for the profile dump::
sage: prof = tmp_dir('RESetMR_profile') + 'profcomp'
sage: res = S.run(profile=prof) # random
[RESetMapReduceWorker-1:58] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp1
...
[RESetMapReduceWorker-1:57] (20:00:41.444) Profiling in
/home/user/.sage/temp/.../32414/RESetMR_profilewRCRAx/profcomp0
...
sage: res
131071
In this example, the profiles have been dumped in files such as
``profcomp0``. One can then load and print them as follows. See
:class:`cProfile.Profile` for more details::
sage: import cProfile, pstats
sage: st = pstats.Stats(prof+'0')
sage: st.strip_dirs().sort_stats('cumulative').print_stats() # random
...
Ordered by: cumulative time
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.023 0.023 0.432 0.432 map_reduce.py:1211(run_myself)
11968 0.151 0.000 0.223 0.000 map_reduce.py:1292(walk_branch_locally)
...
<pstats.Stats instance at 0x7fedea40c6c8>
.. SEEALSO::
`The Python Profilers <https://docs.python.org/2/library/profile.html>`_
for more detail on profiling in python.
.. _logging:
Logging
-------
The computation progress is logged through a :class:`logging.Logger` in
``sage.parallel.map_reduce.logger`` together with :class:`logging.StreamHandler`
and a :class:`logging.Formatter`. They are currently configured to print
warning messages to the console.
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
.. note::
Calls to logger which involve printing the node are commented out in the
code, because the printing (to a string) of the node can be very time
consuming depending on the node and it happens before the decision whether
the logger should record the string or drop it.
.. _protocol-description:
How does it work ?
------------------
The scheduling algorithm we use here is any adaptation of :wikipedia:`Work_stealing`:
In a work stealing scheduler, each processor in a computer system has a
queue of work items (computational tasks, threads) to perform. [...]. Each
work items are initially put on the queue of the processor executing the
work item. When a processor runs out of work, it looks at the queues of
other processors and "steals" their work items. In effect, work stealing
distributes the scheduling work over idle processors, and as long as all
processors have work to do, no scheduling overhead occurs.
For communication we use Python's basic :mod:`multiprocessing` module. We
first describe the different actors and communication tools used by the
system. The work is done under the coordination of a **master** object (an
instance of :class:`RESetMapReduce`) by a bunch of **worker** objects
(instances of :class:`RESetMapReduceWorker`).
Each running map reduce instance works on a :class:`RecursivelyEnumeratedSet of
forest type<sage.combinat.backtrack.SearchForest>` called here `C` and is
coordinated by a :class:`RESetMapReduce` object called the **master**. The
master is in charge of launching the work, gathering the results and cleaning
up at the end of the computation. It doesn't perform any computation
associated to the generation of the element `C` nor the computation of the
mapped function. It however occasionally perform a reduce, but most reducing
is by default done by the workers. Also thanks to the work-stealing algorithm,
the master is only involved in detecting the termination of the computation
but all the load balancing is done at the level of the workers.
Workers are instances of :class:`RESetMapReduceWorker`. They are responsible
for doing the actual computations: element generation, mapping and reducing.
They are also responsible for the load balancing thanks to work-stealing.
Here is a description of the attributes of the **master** relevant to the
map-reduce protocol:
- ``_results`` -- a :class:`~multiprocessing.queues.SimpleQueue` where
the master gathers the results sent by the workers.
- ``_active_tasks`` -- a :class:`~multiprocessing.Semaphore` recording
the number of active tasks. The work is complete when it reaches 0.
- ``_done`` -- a :class:`~multiprocessing.Lock` which ensures that
shutdown is done only once.
- ``_aborted`` -- a :func:`~multiprocessing.Value` storing a shared
:class:`ctypes.c_bool` which is ``True`` if the computation was aborted
before all workers ran out of work.
- ``_workers`` -- a list of :class:`RESetMapReduceWorker` objects.
Each worker is identified by its position in this list.
Each **worker** is a process (:class:`RESetMapReduceWorker` inherits from
:class:`~multiprocessing.Process`) which contains:
- ``worker._iproc`` -- the identifier of the worker that is its position in the
master's list of workers
- ``worker._todo`` -- a :class:`collections.deque` storing of nodes of the
worker. It is used as a stack by the worker. Thiefs steal from the bottom of
this queue.
- ``worker._request`` -- a :class:`~multiprocessing.queues.SimpleQueue` storing
steal request submitted to ``worker``.
- ``worker._read_task``, ``worker._write_task`` -- a
:class:`~multiprocessing.queues.Pipe` used to transfert node during steal.
- ``worker._thief`` -- a :class:`~threading.Thread` which is in charge of
stealing from ``worker._todo``.
Here is a schematic of the architecture:
.. _figure-map_reduce_arch:
.. figure:: ../../media/map_reduce_arch.png
How thefts are performed
------------------------
During normal time, that is, when all workers are active, a worker ``W`` is
iterating though a loop inside
:meth:`RESetMapReduceWorker.walk_branch_locally`. Work nodes are taken from
and new nodes ``W._todo`` are appended to ``W._todo``. When a worker ``W``
runs out of work, that is, when ``worker._todo`` is empty, it tries to steal
some work (i.e., a node) from another worker. This is performed in the
:meth:`RESetMapReduceWorker.steal` method.
From the point of view of ``W``, here is what happens:
- ``W`` signals to the master that it is idle: ``master._signal_task_done``;
- ``W`` chooses a victim ``V`` at random;
- ``W`` sends a request to ``V``: it puts its identifier into ``V._request``;
- ``W`` tries to read a node from ``W._read_task``. Then three things may happen:
+ a proper node is read. Then the theft was a success and ``W`` starts
working locally on the received node.
+ ``None`` is received. This means that ``V`` was idle. Then ``W`` tries
another victim.
+ :exc:`AbortError` is received. This means either that the computation was
aborted or that it simply succeeded and that no more work is required by
``W``. Therefore an :exc:`AbortError` exception is raised leading ``W`` to
shutdown.
We now describe the protocol on the victim's side. Each worker process contains
a :class:`Thread` which we call ``T`` for thief which acts like some kind of
Troyan horse during theft. It is normally blocked waiting for a steal request.
From the point of view of ``V`` and ``T``, here is what happens:
- during normal time, ``T`` is blocked waiting on ``V._request``;
- upon steal request, ``T`` wakes up receiving the identification of ``W``;
- ``T`` signals to the master that a new task is starting by
``master._signal_task_start``;
- Two things may happen depending if the queue ``V._todo`` is empty or not.
Remark that due to the GIL, there is no parallel execution between the
victim ``V`` and its thief thread ``T``.
+ If ``V._todo`` is empty, then ``None`` is answered on
``W._write_task``. The task is immediately signaled to end the master
through ``master._signal_task_done``.
+ Otherwise, a node is removed from the bottom of ``V._todo``. The node is
sent to ``W`` on ``W._write_task``. The task will be ended by ``W``, that
is, when finished working on the subtree rooted at the node, ``W`` will
call ``master._signal_task_done``.
The end of the computation
--------------------------
To detect when a computation is finished, a synchronized integer is kept which
counts the number of active tasks. This is essentially a semaphore but
semaphores are broken on Darwin OSes so we ship two implementations depending
on the OS (see :class:`ActiveTaskCounter` and :class:`ActiveTaskCounterDarwin`
and the note below).
When a worker finishes working on a task, it calls
``master._signal_task_done``. This decreases the task counter
``master._active_tasks``. When it reaches 0, it means that there are no more
nodes: the work is completed. The worker executes ``master._shutdown``
which sends :exc:`AbortError` to all ``worker._request`` and
``worker._write_task`` queues. Each worker or thief thread receiving such
a message raises the corresponding exception, therefore stopping its work. A
lock called ``master._done`` ensures that shutdown is only done once.
Finally, it is also possible to interrupt the computation before its ends,
by calling ``master.abort()``. This is achieved by setting
``master._active_tasks`` to 0 and calling ``master._shutdown``.
.. warning:: The macOS Semaphore bug
Darwin OSes do not correctly implement POSIX's semaphore semantic.
Indeed, on these systems, acquire may fail and return False not only when
the semaphore is equal to zero but also **because someone else is trying
to acquire** at the same time. This makes using Semaphores impossible
on macOS so that on these systems we use a synchronized integer instead.
.. _examples:
Are there examples of classes?
------------------------------
Yes! Here they are:
- :class:`RESetMPExample` -- a simple basic example
- :class:`RESetParallelIterator` -- a more advanced example using non standard
communication configuration.
Tests
-----
Generating series for the sum of strictly decreasing lists of integers
smaller than 15::
sage: y = polygen(ZZ, 'y')
sage: R = RESetMapReduce(
....: roots=[([], 0, 0)] + [([i], i, i) for i in range(1, 15)],
....: children=lambda list_sum_last:
....: [(list_sum_last[0] + [i], list_sum_last[1] + i, i)
....: for i in range(1, list_sum_last[2])],
....: map_function=lambda li_sum_dummy: y**li_sum_dummy[1])
sage: sg = R.run()
sage: sg == prod((1 + y**i) for i in range(1, 15))
True
Classes and methods
-------------------
"""
from __future__ import print_function, absolute_import
from threading import Thread
from sage.sets.recursively_enumerated_set import RecursivelyEnumeratedSet # _generic
from sage.misc.lazy_attribute import lazy_attribute
import collections
import copy
import sys
import random
import queue
import ctypes
import logging
logger = logging.getLogger(__name__)
logger.__doc__ = (
"""
A logger for :mod:`sage.parallel.map_reduce`
.. SEEALSO::
`Logging facility for Python <https://docs.python.org/2/library/logging.html>`_
for more detail on logging and log system configuration.
""")
logger.setLevel(logging.WARN)
# logger.setLevel(logging.INFO)
# logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'[%(processName)s-%(threadName)s] (%(asctime)s.%(msecs)03.f) %(message)s',
datefmt='%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
# Set up a multiprocessing context to use for this modules (using the
# 'fork' method which is basically same as on Python 2)
import multiprocessing as mp
mp = mp.get_context('fork')
def proc_number(max_proc=None):
r"""
Return the number of processes to use.
INPUT:
- ``max_proc`` -- an upper bound on the number of processes or
``None``.
EXAMPLES::
sage: from sage.parallel.map_reduce import proc_number
sage: proc_number() # random
8
sage: proc_number(max_proc=1)
1
sage: proc_number(max_proc=2) in (1, 2)
True
"""
from sage.parallel.ncpus import ncpus
n = ncpus()
if max_proc is None:
return n
else:
return min(max_proc, n)
class AbortError(Exception):
r"""
Exception for aborting parallel computations.
This is used both as exception or as abort message.
TESTS::
sage: from sage.parallel.map_reduce import AbortError
sage: raise AbortError
Traceback (most recent call last):
...
AbortError
"""
pass
class ActiveTaskCounterDarwin(object):
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is essentially a semaphore, but Darwin OSes
do not correctly implement POSIX's semaphore semantic. So we use
a shared integer with a lock.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Value(ctypes.c_int, task_number)
self._lock = mp.Lock()
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.value)
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise increment the counter and returns its value after the
incrementation.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
with self._lock:
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks.value <= 0:
return 0
self._active_tasks.value += 1
return self._active_tasks.value
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
with self._lock:
self._active_tasks.value -= 1
return self._active_tasks.value
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounterDarwin as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
with self._lock:
self._active_tasks.value = 0
class ActiveTaskCounterPosix(object):
r"""
Handling the number of active tasks.
A class for handling the number of active tasks in a distributed
computation process. This is the standard implementation on POSIX
compliant OSes. We essentially wrap a semaphore.
.. note::
A legitimate question is whether there is a need in keeping the two
implementations. I ran the following experiment on my machine::
S = RecursivelyEnumeratedSet(
[[]],
lambda l: ([l[:i] + [len(l)] + l[i:]
for i in range(len(l) + 1)]
if len(l) < NNN else []),
structure='forest',
enumeration='depth')
%time sp = S.map_reduce(lambda z: x**len(z)); sp
For NNN = 10, averaging a dozen of runs, I got:
- Posix compliant implementation: 17.04 s
- Darwin implementation: 18.26 s
So there is a non negligible overhead. It will probably be worth it
if we try to cythonize the code. So I'm keeping both implementations.
"""
def __init__(self, task_number):
r"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: t = ATC(4)
sage: TestSuite(t).run(skip="_test_pickling", verbose=True)
running ._test_new() . . . pass
"""
self._active_tasks = mp.Semaphore(task_number)
def __repr__(self):
"""
TESTS::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: ATC(4)
ActiveTaskCounter(value=4)
"""
return "ActiveTaskCounter(value=%s)" % (self._active_tasks.get_value())
def task_start(self):
r"""
Increment the task counter by one.
OUTPUT:
Calling :meth:`task_start` on a zero or negative counter returns 0,
otherwise it increments the counter and returns its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_start()
5
sage: c
ActiveTaskCounter(value=5)
Calling :meth:`task_start` on a zero counter does nothing::
sage: c = ATC(0)
sage: c.task_start()
0
sage: c
ActiveTaskCounter(value=0)
"""
logger.debug("_signal_task_start called")
# The following test is not necessary but is allows active thieves to
# stop before receiving the poison pill.
if self._active_tasks._semlock._is_zero():
return 0
self._active_tasks.release()
return self._active_tasks.get_value()
task_start.__doc__ = ActiveTaskCounterDarwin.task_start.__doc__
def task_done(self):
r"""
Decrement the task counter by one.
OUTPUT:
Calling :meth:`task_done` decrements the counter and returns
its new value.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.task_done()
3
sage: c
ActiveTaskCounter(value=3)
sage: c = ATC(0)
sage: c.task_done()
-1
"""
logger.debug("_signal_task_done called")
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so it is needed.
if not self._active_tasks.acquire(False):
return -1
return self._active_tasks.get_value()
def abort(self):
r"""
Set the task counter to zero.
EXAMPLES::
sage: from sage.parallel.map_reduce import ActiveTaskCounter as ATC
sage: c = ATC(4); c
ActiveTaskCounter(value=4)
sage: c.abort()
sage: c
ActiveTaskCounter(value=0)
"""
while self._active_tasks.acquire(False):
pass
ActiveTaskCounter = (ActiveTaskCounterDarwin if sys.platform == 'darwin'
else ActiveTaskCounterPosix)
# ActiveTaskCounter = ActiveTaskCounterDarwin # to debug Darwin implementation
class RESetMapReduce(object):
r"""
Map-Reduce on recursively enumerated sets.
INPUT:
Description of the set:
- either ``forest=f`` -- where ``f`` is a :class:`RecursivelyEnumeratedSet
of forest type<sage.combinat.backtrack.SearchForest>`
- or a triple ``roots, children, post_process`` as follows
- ``roots=r`` -- The root of the enumeration
- ``children=c`` -- a function iterating through children nodes,
given a parent node
- ``post_process=p`` -- a post-processing function
The option ``post_process`` allows for customizing the nodes that
are actually produced. Furthermore, if ``post_process(x)`` returns ``None``,
then ``x`` won't be output at all.
Description of the map/reduce operation:
- ``map_function=f`` -- (default to ``None``)
- ``reduce_function=red`` -- (default to ``None``)
- ``reduce_init=init`` -- (default to ``None``)
.. SEEALSO::
:mod:`the Map/Reduce module <sage.parallel.map_reduce>` for
details and examples.
"""
def __init__(self,
roots=None,
children=None,
post_process=None,
map_function=None,
reduce_function=None,
reduce_init=None,
forest=None):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: R = RESetMapReduce([[]], lambda: [[]])
sage: R
<sage.parallel.map_reduce.RESetMapReduce object at 0x...>
To silence the coverage checker::
sage: TestSuite(R).run(skip=['_test_pickling'])
"""
if forest is not None:
if not all(x is None for x in (roots, children, post_process)):
raise ValueError("forest arg is incompatible with roots, children and post_process")
self._forest = forest
self._roots = forest._roots
self.children = forest.children
if hasattr(forest, 'post_process'):
self.post_process = forest.post_process
else:
if roots is not None: self._roots = roots
if children is not None: self.children = children
if post_process is not None: self.post_process = post_process
if map_function is not None: self.map_function = map_function
if reduce_function is not None: self.reduce_function = reduce_function
if reduce_init is not None: self._reduce_init = reduce_init
self._profile = None
@lazy_attribute
def _forest(self):
r"""
Return the forest underlying the map-reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: f = EX._forest; f
An enumerated set with a forest structure
sage: f.an_element()
[]
"""
return RecursivelyEnumeratedSet(
self.roots(),
self.children,
post_process=self.post_process,
structure='forest',
enumeration='depth')
def roots(self):
r"""
Return the roots of ``self``.
OUTPUT:
An iterable of nodes.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce(42)
sage: S.roots()
42
"""
return self._roots
def map_function(self, o):
r"""
Return the function mapped by ``self``.
INPUT:
- ``o`` -- a node
OUTPUT:
By default ``1``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.map_function(7)
1
sage: S = RESetMapReduce(map_function = lambda x: 3*x + 5)
sage: S.map_function(7)
26
"""
return 1
def reduce_function(self, a, b):
r"""
Return the reducer function for ``self``.
INPUT:
- ``a``, ``b`` -- two values to be reduced
OUTPUT:
By default the sum of ``a`` and ``b``.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_function(4, 3)
7
sage: S = RESetMapReduce(reduce_function=lambda x,y: x*y)
sage: S.reduce_function(4, 3)
12
"""
return a+b
def post_process(self, a):
r"""
Return the image of ``a`` under the post-processing function for ``self``.
INPUT:
- ``a`` -- a node
With the default post-processing function, which is the identity function,
this returns ``a`` itself.
.. note:: This should be overloaded in applications.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.post_process(4)
4
sage: S = RESetMapReduce(post_process=lambda x: x*x)
sage: S.post_process(4)
16
"""
return a
_reduce_init = 0
def reduce_init(self):
r"""
Return the initial element for a reduction.
.. note:: This should be overloaded in applications.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.reduce_init()
0
sage: S = RESetMapReduce(reduce_init = 2)
sage: S.reduce_init()
2
"""
return copy.copy(self._reduce_init)
def setup_workers(self, max_proc=None, reduce_locally=True):
r"""
Setup the communication channels.
INPUT:
- ``max_proc`` -- (integer) an upper bound on the number of
worker processes.
- ``reduce_locally`` -- whether the workers should reduce locally
their work or sends results to the master as soon as possible.
See :class:`RESetMapReduceWorker` for details.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: S._results
<multiprocessing.queues.Queue object at 0x...>
sage: len(S._workers)
2
"""
self._nprocess = proc_number(max_proc)
self._results = mp.Queue()
self._active_tasks = ActiveTaskCounter(self._nprocess)
self._done = mp.Lock()
self._aborted = mp.Value(ctypes.c_bool, False)
sys.stdout.flush()
sys.stderr.flush()
self._workers = [RESetMapReduceWorker(self, i, reduce_locally)
for i in range(self._nprocess)]
def start_workers(self):
r"""
Launch the workers.
The workers should have been created using :meth:`setup_workers`.
TESTS::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: def children(x):
....: sleep(0.5)
....: return []
sage: S = RESetMapReduce(roots=[1], children=children)
sage: S.setup_workers(2)
sage: S.start_workers()
sage: all(w.is_alive() for w in S._workers)
True
sage: sleep(1)
sage: all(not w.is_alive() for w in S._workers)
True
Cleanup::
sage: S.finish()
"""
if self._nprocess == 0:
raise ValueError("No process connected")
logger.info("Starting work with %s processes", self._nprocess)
logger.debug("Distributing tasks")
for i, task in enumerate(self.roots()):
self._workers[i % len(self._workers)]._todo.append(task)
logger.debug("Starting processes")
sys.stdout.flush()
sys.stderr.flush()
for w in self._workers: w.start()
def get_results(self, timeout=None):
r"""
Get the results from the queue.
OUTPUT:
The reduction of the results of all the workers, that is, the result of
the map/reduce computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMapReduce
sage: S = RESetMapReduce()
sage: S.setup_workers(2)
sage: for v in [1, 2, None, 3, None]: S._results.put(v)
sage: S.get_results()
6
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
res = self.reduce_init()
active_proc = self._nprocess
while active_proc > 0:
try:
logger.debug('Waiting on results; active_proc: %s, '
'timeout: %s, aborted: %s' %
(active_proc, timeout, self._aborted.value))
newres = self._results.get(timeout=timeout)
except queue.Empty:
logger.debug('Timed out waiting for results; aborting')
# If we timed out here then the abort timer should have
# already fired, but just in case it didn't (or is in
# progress) wait for it to finish
self._timer.join()
return
if newres is not None:
logger.debug("Got one result")
res = self.reduce_function(res, newres)
else:
active_proc -= 1
return res
def finish(self):
r"""
Destroy the workers and all the communication objects.
Communication statistics are gathered before destroying the workers.
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=5)
sage: S.setup_workers(2) # indirect doctest
sage: S._workers[0]._todo.append([])
sage: for w in S._workers: w.start()
sage: _ = S.get_results()
sage: S._shutdown()
sage: S.print_communication_statistics()
Traceback (most recent call last):
...
AttributeError: 'RESetMPExample' object has no attribute '_stats'
sage: S.finish()
sage: S.print_communication_statistics()
#proc: ...
...
sage: _ = S.run() # cleanup
.. SEEALSO:: :meth:`print_communication_statistics`
"""
if not self._aborted.value:
logger.debug("Joining worker processes...")
for worker in self._workers:
logger.debug("Joining %s" % worker.name)
worker.join()
logger.debug("Joining done")
else:
logger.debug("Killing worker processes...")
for worker in self._workers:
logger.debug("Terminating %s" % worker.name)
worker.terminate()
logger.debug("Killing done")
del self._results, self._active_tasks, self._done
self._get_stats()
del self._workers
def abort(self):
r"""
Abort the current parallel computation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 17 else [])
sage: it = iter(S)
sage: next(it) # random
[]
sage: S.abort()
sage: hasattr(S, 'work_queue')
False
Cleanup::
sage: S.finish()
"""
logger.info("Abort called")
self._aborted.value = True
self._active_tasks.abort()
self._shutdown()
def _shutdown(self):
r"""
Shutdown the workers.
Sends a poison pill to all workers and their thief thread.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: for w in S._workers: w.start()
sage: S._shutdown()
Cleanup::
sage: S.finish()
"""
if self._done.acquire(False):
logger.debug("***************** FINISHED ******************")
logger.debug("Sending poison pills")
for worker in self._workers:
worker._request.put(AbortError)
for worker in self._workers:
worker._write_task.send(AbortError)
def _signal_task_start(self):
r"""
Signal a starting task.
Used by the worker to signal that a new task is starting. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l+[0], l+[1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_start()
sage: S._active_tasks
ActiveTaskCounter(value=3)
Signaling one time too many raises an :exc:`AbortError`::
sage: S._signal_task_done()
sage: S._signal_task_done()
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
"""
if self._active_tasks.task_start() == 0:
raise AbortError
def _signal_task_done(self):
r"""
Signal a task is done.
Used by the worker to signal that a task is done. As soon as
there are no more active task, the work is done, in which case an
:exc:`AbortError` is raised.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator(
....: [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 20 else [])
sage: S.setup_workers(2)
sage: S._active_tasks
ActiveTaskCounter(value=2)
sage: S._signal_task_done()
sage: S._active_tasks
ActiveTaskCounter(value=1)
sage: S._signal_task_done()
Traceback (most recent call last):
...
AbortError
Cleanup::
sage: del S._results, S._active_tasks, S._done, S._workers
"""
# We test if the semaphore counting the number of active tasks is
# becoming negative. This should not happen in normal
# computations. However, in case of abort, we artificially put the
# semaphore to 0 to stop the computation so that it is needed.
if self._active_tasks.task_done() <= 0:
logger.debug("raising AbortError")
self._shutdown()
raise AbortError
def random_worker(self):
r"""
Return a random worker.
OUTPUT:
A worker for ``self`` chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: EX.random_worker()
<RESetMapReduceWorker(RESetMapReduceWorker-..., initial)>
sage: EX.random_worker() in EX._workers
True
Cleanup::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
victim = random.randint(0, len(self._workers)-1)
return self._workers[victim]
def run(self,
max_proc=None,
reduce_locally=True,
timeout=None,
profile=None):
r"""
Run the computations.
INPUT:
- ``max_proc`` -- (integer, default: ``None``) if given, the
maximum number of worker processors to use. The actual number
is also bounded by the value of the environment variable
``SAGE_NUM_THREADS`` (the number of cores by default).
- ``reduce_locally`` -- See :class:`RESetMapReduceWorker` (default: ``True``)
- ``timeout`` -- a timeout on the computation (default: ``None``)
- ``profile`` -- directory/filename prefix for profiling, or ``None``
for no profiling (default: ``None``)
OUTPUT:
The result of the map/reduce computation or an exception
:exc:`AbortError` if the computation was interrupted or timeout.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 8)
sage: EX.run()
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Here is an example or how to deal with timeout::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 100)
sage: try:
....: res = EX.run(timeout=0.01)
....: except AbortError:
....: print("Computation timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation timeout
The following should not timeout even on a very slow machine::
sage: from sage.parallel.map_reduce import AbortError
sage: EX = RESetMPExample(maxl = 8)
sage: try:
....: res = EX.run(timeout=60)
....: except AbortError:
....: print("Computation Timeout")
....: else:
....: print("Computation normally finished")
....: res
Computation normally finished
40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
self._profile=profile
self.setup_workers(max_proc, reduce_locally)
self.start_workers()
if timeout is not None:
from threading import Timer
self._timer = Timer(timeout, self.abort)
self._timer.start()
self.result = self.get_results(timeout=timeout)
if timeout is not None:
self._timer.cancel()
logger.info("Returning")
self.finish()
if self._aborted.value:
raise AbortError
else:
return self.result
def _get_stats(self):
r"""
Gather the communication statistics at the end of a run.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run() # indirect doctest
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
res = []
for i in range(self._nprocess):
res.append(tuple(self._workers[i]._stats))
self._stats = res
def print_communication_statistics(self, blocksize=16):
r"""
Print the communication statistics in a nice way.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: S = RESetMPExample(maxl=6)
sage: S.run()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
sage: S.print_communication_statistics() # random
#proc: 0 1 2 3 4 5 6 7
reqs sent: 5 2 3 11 21 19 1 0
reqs rcvs: 10 10 9 5 1 11 9 2
- thefs: 1 0 0 0 0 0 0 0
+ thefs: 0 0 1 0 0 0 0 0
"""
res = [""] # classic trick to have a local variable shared with the
# local function (see e.g:
# https://stackoverflow.com/questions/2609518/python-nested-function-scopes).
def pstat(name, start, end, ist):
res[0] += ("\n" + name + " ".join(
"%4i" % (self._stats[i][ist]) for i in range(start, end)))
for start in range(0, self._nprocess, blocksize):
end = min(start+blocksize, self._nprocess)
res[0] = ("#proc: " +
" ".join("%4i" % (i) for i in range(start, end)))
pstat("reqs sent: ", start, end, 0)
pstat("reqs rcvs: ", start, end, 1)
pstat("- thefs: ", start, end, 2)
pstat("+ thefs: ", start, end, 3)
print(res[0])
def run_serial(self):
r"""
Run the computation serially (mostly for tests).
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample(maxl = 4)
sage: EX.run_serial()
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
import functools
return functools.reduce(self.reduce_function,
(self.map_function(x) for x in self._forest),
self.reduce_init())
class RESetMapReduceWorker(mp.Process):
"""
Worker for generate-map-reduce.
This shouldn't be called directly, but instead created by
:meth:`RESetMapReduce.setup_workers`.
INPUT:
- ``mapred`` -- the instance of :class:`RESetMapReduce` for which
this process is working.
- ``iproc`` -- the id of this worker.
- ``reduce_locally`` -- when reducing the results. Three possible values
are supported:
* ``True`` -- means the reducing work is done all locally, the result is
only sent back at the end of the work. This ensure the lowest level of
communication.
* ``False`` -- results are sent back after each finished branches, when
the process is asking for more work.
"""
def __init__(self, mapred, iproc, reduce_locally):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample()
sage: RESetMapReduceWorker(EX, 200, True)
<RESetMapReduceWorker(RESetMapReduceWorker-..., initial)>
"""
mp.Process.__init__(self)
self._iproc = iproc
self._todo = collections.deque()
self._request = mp.SimpleQueue() # Faster than Queue
# currently this is not possible to have to simultaneous read or write
# on the following Pipe. So there is no need to have a queue.
self._read_task, self._write_task = mp.Pipe(duplex=False)
self._mapred = mapred
self._stats = mp.RawArray('i', 4)
self._reduce_locally = reduce_locally
def _thief(self):
r"""
Return the thief thread of this worker process.
"""
logger.debug("Thief started")
reqs = 0
thefts = 0
try:
for ireq in iter(self._request.get, AbortError):
reqs +=1
target = self._mapred._workers[ireq]
logger.debug("Got a Steal request from %s" % target.name)
self._mapred._signal_task_start()
try:
work = self._todo.popleft()
except IndexError:
target._write_task.send(None)
logger.debug("Failed Steal %s" % target.name)
self._mapred._signal_task_done()
else:
target._write_task.send(work)
logger.debug("Succesful Steal %s" % target.name)
thefts += 1
except AbortError:
logger.debug("Thief aborted")
else:
logger.debug("Thief received poison pill")
if self._mapred._aborted.value: # Computation was aborted
self._todo.clear()
else: # Check that there is no remaining work
assert len(self._todo) == 0, "Bad stop the result may be wrong"
self._stats[1] = reqs
self._stats[2] = thefts
logger.debug("Thief Exiting")
def steal(self):
r"""
Steal some node from another worker.
OUTPUT:
A node stolen from another worker chosen at random.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: from threading import Thread
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(2)
sage: w0, w1 = EX._workers
sage: w0._todo.append(42)
sage: thief0 = Thread(target = w0._thief, name="Thief")
sage: thief0.start() # known bug (Trac #27537)
sage: w1.steal() # known bug (Trac #27537)
42
sage: w0._todo # known bug (Trac #27537)
deque([])
"""
self._mapred._signal_task_done()
node = None
while node is None:
victim = self._mapred.random_worker()
if victim is not self:
logger.debug("Trying to steal from %s" % victim.name)
victim._request.put(self._iproc)
self._stats[0] += 1
logger.debug("waiting for steal answer from %s" % victim.name)
node = self._read_task.recv()
# logger.debug("Request answer: %s" % (node,))
if node is AbortError:
raise AbortError
# logger.debug("Received a stolen node: %s" % (node,))
self._stats[3] += 1
return node
def run(self):
r"""
The main function executed by the worker.
Calls :meth:`run_myself` after possibly setting up parallel profiling.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run()
sage: sleep(1)
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
profile = self._mapred._profile
if profile is not None:
import cProfile
PROFILER = cProfile.Profile()
PROFILER.runcall(self.run_myself)
output = profile + str(self._iproc)
logger.warn("Profiling in %s ..." % output)
PROFILER.dump_stats(output)
else:
self.run_myself()
def run_myself(self):
r"""
The main function executed by the worker.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=6)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._todo.append(EX.roots()[0])
sage: w.run_myself()
sage: sleep(1)
sage: w._todo.append(None)
sage: EX.get_results()
720*x^6 + 120*x^5 + 24*x^4 + 6*x^3 + 2*x^2 + x + 1
Cleanups::
sage: del EX._results, EX._active_tasks, EX._done, EX._workers
"""
logger.debug("Started")
mapred = self._mapred
reduce_init = mapred.reduce_init
results = mapred._results
self._stats[0] = 0
self._stats[3] = 0
logger.debug("Launching thief")
self._thief = Thread(target = self._thief, name="Thief")
self._thief.start()
self._res = reduce_init()
try:
while True:
try:
node = self._todo.pop()
except IndexError:
node = self.steal()
self.walk_branch_locally(node)
if not self._reduce_locally:
self.send_partial_result()
except AbortError:
logger.debug("Worker Done !")
results.put(self._res)
results.put(None)
self._thief.join()
del self._request
self._read_task.close()
self._write_task.close()
del self._read_task, self._write_task
del self._mapred
del self._stats
logger.debug("Exiting")
def send_partial_result(self):
r"""
Send results to the MapReduce process.
Send the result stored in ``self._res`` to the master an reinitialize it to
``master.reduce_init``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: EX.setup_workers(1)
sage: w = EX._workers[0]
sage: w._res = 4
sage: w.send_partial_result()
sage: w._res
0
sage: EX._results.get()
4
"""
self._mapred._results.put(self._res)
self._res = self._mapred.reduce_init()
def walk_branch_locally(self, node):
r"""
Work locally.
Performs the map/reduce computation on the subtrees rooted at ``node``.
INPUT:
- ``node`` -- the root of the subtree explored.
OUTPUT:
Nothing, the result are stored in ``self._res``.
This is where the actual work is performed.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample, RESetMapReduceWorker
sage: EX = RESetMPExample(maxl=4)
sage: w = RESetMapReduceWorker(EX, 0, True)
sage: def sync(): pass
sage: w.synchronize = sync
sage: w._res = 0
sage: w.walk_branch_locally([])
sage: w._res
x^4 + x^3 + x^2 + x + 1
sage: w.walk_branch_locally(w._todo.pop())
sage: w._res
2*x^4 + x^3 + x^2 + x + 1
sage: while True: w.walk_branch_locally(w._todo.pop())
Traceback (most recent call last):
...
IndexError: pop from an empty deque
sage: w._res
24*x^4 + 6*x^3 + 2*x^2 + x + 1
"""
mapred = self._mapred
children = mapred.children
post_process = mapred.post_process
fun = mapred.map_function
reduc = mapred.reduce_function
# logger.debug("Working on %s..." % (node,))
while True:
res = post_process(node)
if res is not None:
self._res = reduc(self._res, fun(res))
newnodes = iter(children(node))
try:
node = next(newnodes)
except StopIteration:
return
self._todo.extend(newnodes)
class RESetMPExample(RESetMapReduce):
r"""
An example of map reduce class.
INPUT:
- ``maxl`` -- the maximum size of permutations generated (default to `9`).
This computes the generating series of permutations counted by their size
up to size ``maxl``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: EX = RESetMPExample()
sage: EX.run()
362880*x^9 + 40320*x^8 + 5040*x^7 + 720*x^6 + 120*x^5
+ 24*x^4 + 6*x^3 + 2*x^2 + x + 1
.. SEEALSO:: This is an example of :class:`RESetMapReduce`
"""
def __init__(self, maxl = 9):
r"""
TESTS::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample()
<sage.parallel.map_reduce.RESetMPExample object at 0x...>
"""
RESetMapReduce.__init__(self)
from sage.rings.polynomial.polynomial_ring import polygen
from sage.rings.integer_ring import ZZ
self.x = polygen(ZZ, 'x')
self.maxl = maxl
def roots(self):
r"""
Return the empty permutation.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().roots()
[[]]
"""
return [[]]
def children(self, l):
r"""
Return the children of the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The lists with ``len(l)`` inserted at all possible positions into ``l``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().children([1,0])
[[2, 1, 0], [1, 2, 0], [1, 0, 2]]
"""
return [ l[:i] + [len(l)] + l[i:]
for i in range(len(l)+1) ] if len(l) < self.maxl else []
def map_function(self, l):
r"""
The monomial associated to the permutation `l`.
INPUT:
- ``l`` -- a list containing a permutation
OUTPUT:
The monomial ``x^len(l)``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetMPExample
sage: RESetMPExample().map_function([1,0])
x^2
"""
return self.x**len(l)
class RESetParallelIterator(RESetMapReduce):
r"""
A parallel iterator for recursively enumerated sets.
This demonstrates how to use :class:`RESetMapReduce` to get an iterator on
a recursively enumerated set for which the computations are done in
parallel.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator([[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: sum(1 for _ in S)
65535
"""
def map_function(self, z):
r"""
Return a singleton tuple.
INPUT:
- ``z`` -- a node
OUTPUT:
The singleton ``(z, )``.
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: S.map_function([1, 0])
([1, 0],)
"""
return (z,)
reduce_init = tuple
def __iter__(self):
r"""
EXAMPLES::
sage: from sage.parallel.map_reduce import RESetParallelIterator
sage: S = RESetParallelIterator( [[]],
....: lambda l: [l + [0], l + [1]] if len(l) < 15 else [])
sage: it = iter(S)
sage: next(it) # random
[1, 1, 0]
sage: next(it) # random
[1, 1, 0, 1]
sage: sum(1 for _ in it)
65533
"""
self.setup_workers(reduce_locally=False)
self.start_workers()
active_proc = self._nprocess
while True:
newres = self._results.get()
if newres is not None:
logger.debug("Got some results")
for r in newres:
yield r
else:
active_proc -= 1
if active_proc == 0:
break
self.finish()
|
numpipe.py
|
"""
Defines the scheduler class, which does the following:
* keeps track of all cached functions
* parse arguments when program is run
* execute functions (possibly in parallel)
* cache results as they come in
"""
import h5py
import os
import sys
import pathlib
import logging
from inspect import signature
from multiprocessing import Pool, Value
import threading
import socket
import pickle
import numpy as np
import subprocess
from time import sleep, time
from functools import partial
from typing import Iterable, types
import matplotlib.pyplot as plt
import traceback
from copy import copy
import itertools
import warnings
import numpipe
from numpipe import slurm, display, notify, mpl_tools, config
from numpipe.execution import deferred_function, target, block, execute_block, execute_block_debug
from numpipe.utility import doublewrap
from numpipe.parser import run_parser
from numpipe.networking import recv_msg,send_msg
USE_SERVER = False
class scheduler:
"""Deferred function evaluation and access to cached function output"""
def __init__(self, dirpath=None):
warnings.simplefilter("default")
self.blocks = dict()
self.instances = dict()
self.instance_counts = dict()
self.instance_dependency = dict()
self.at_end_functions = dict()
self.animations = dict()
if dirpath is None:
self.dirpath = sys.path[0]
else:
if dirpath[0] not in ('/', '~', '$'):
self.dirpath = os.path.join(sys.path[0], dirpath)
else:
self.dirpath = dirpath
self.dirpath = os.path.expanduser(self.dirpath)
self.dirpath = os.path.expandvars(self.dirpath)
pathlib.Path(self.dirpath).mkdir(parents=False, exist_ok=True)
self.filename = os.path.splitext(os.path.basename(sys.argv[0]))[0]
if USE_SERVER:
address = ('localhost', 6000)
self.pipe = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.pipe.connect(address)
send_msg(self.pipe, pickle.dumps(['new', 'ID']))
self.complete = False
self.notifications = []
#TODO implement load all, jdefer
def load(self, function=None, instance=None, defer=False):
"""
Load cached symbols for particular function
Arguments:
function name of cached function (if None: load all cached functions)
instance name of instance (if None: load all instances)
defer If True, defer loading
"""
func_name = function.__name__
if not isinstance(instance, str) and isinstance(instance, Iterable):
instance = '-'.join([str(x) for x in instance])
if func_name in self.instances.keys():
if instance is None:
class load_next:
def __init__(self, labels, blocks):
self.length = len(labels)
self.labels = iter(labels)
self.blocks = blocks
def __len__(self):
return self.length
def __iter__(self):
return self
def __next__(self):
label = next(self.labels)
name = label[label.find('-')+1:]
return (name, self.blocks[label].target.load())
labels = self.get_labels(func_name)
return load_next(labels, self.blocks)
else:
label = f'{func_name}-{instance}'
else:
label = func_name
return self.blocks[label].target.load()
def execute(self):
warnings.warn('use scheduler.run() instead of scheduler.execute()', DeprecationWarning)
self.run()
def run(self):
"""Run the requested cached functions and at-end functions"""
self._init_logging()
self.args = run_parser()
numpipe._pbars.mininterval = self.args.mininterval
numpipe._pbars.character = config.get_config()['progress']['character']
self.fix_block_names()
if self.args.notify_message is not None:
self.notifications.append(partial(notify.send_message_from, self.args.notify_message, self.filename))
### display only event
if self.args.action == 'display':
self.display_functions()
return
if self.args.action == 'clean':
self.clean()
return
if self.args.delete is not None:
self.delete()
return
self.num_blocks_executed = 0
if not self.args.at_end:
### determine which functions to execute based on file and command line
if self.args.rerun is None:
blocks_to_execute = {name: block for name, block in self.blocks.items() if not block.target.exists()}
elif len(self.args.rerun) == 0:
blocks_to_execute = self.blocks
else:
blocks_to_execute = dict()
for name in self.args.rerun:
labels = self.get_labels(name)
blocks_to_execute.update({label: self.blocks[label] for label in labels})
for name in self.args.exclude:
for key in self.get_labels(name):
blocks_to_execute.pop(key, 0)
self.resolve_dependencies_down(blocks_to_execute)
self.num_blocks_executed = len(blocks_to_execute)
overwriten = self._overwrite([block.target for block in blocks_to_execute.values()])
if not overwriten:
display.abort_message()
return
self.resolve_dependencies_up(blocks_to_execute)
if self.args.action == 'slurm':
slurm.create_lookup(self.filename, blocks_to_execute.keys())
sbatch_filename = slurm.create_sbatch(self.filename, blocks_to_execute.keys(),
time=self.args.time, memory=self.args.memory)
wall_time = slurm.wall_time(self.args.time)
display.slurm_message(sbatch_filename, wall_time, self.num_blocks_executed, self.args.no_submit)
return
### execute all items
if self.args.processes is None:
nprocs = min(os.cpu_count(), self.num_blocks_executed)
else:
nprocs = min(self.args.processes, self.num_blocks_executed)
numpipe._pbars.set_njobs(self.num_blocks_executed)
t_start = time()
if self.num_blocks_executed:
display.cached_function_message()
if self.args.debug:
remaining = list(blocks_to_execute.keys())
num_blocks_ran = 0
while remaining:
to_delete = []
for name in remaining:
block = blocks_to_execute[name]
if self.ready_to_run(block):
execute_block_debug(block, name, self.instances,
self.args.cache_time, num_blocks_ran, self.num_blocks_executed)
to_delete.append(name)
block.complete = True
num_blocks_ran += 1
for name in to_delete:
remaining.remove(name)
sleep(.1)
else:
with Pool(processes=nprocs) as pool:
results = dict()
remaining = list(blocks_to_execute.keys())
num_blocks_ran = 0
num_exceptions = 0
while remaining or results:
to_delete = []
for name in remaining:
block = blocks_to_execute[name]
if self.ready_to_run(block):
results[name] = pool.apply_async(execute_block,
(block, name, self.instances, self.args.cache_time, num_blocks_ran, self.num_blocks_executed))
to_delete.append(name)
num_blocks_ran += 1
for name in to_delete:
remaining.remove(name)
to_delete = []
for name, result in results.items():
if result.ready():
try:
result.get()
except Exception as err:
num_exceptions += 1
logging.error(err)
self.blocks[name].complete = True
to_delete.append(name)
for name in to_delete:
results.pop(name)
sleep(.1)
if USE_SERVER:
t = threading.Thread(target=self.listening_thread)
t.start()
pool.close()
pool.join()
self.complete = True
if blocks_to_execute:
self.notifications.append(partial(notify.send_finish_message,
filename=self.filename,
njobs=len(blocks_to_execute),
time=time() - t_start,
num_exceptions=num_exceptions))
if USE_SERVER:
t.join()
self.pipe.close()
display.cached_function_summary(self.num_blocks_executed, num_exceptions)
numpipe._pbars.set_njobs(1)
numpipe._pbars.reset()
numpipe._pbars.auto_serial = True
### At-end functions
if self.at_end_functions and not self.args.no_at_end:
display.at_end_message()
for func in self.at_end_functions.values():
func()
else:
if self.args.notify:
self.send_notifications(check_idle=False, idle=True)
def ready_to_run(self, block):
if block.dependencies is None:
return True
for D in block.dependencies:
if not self.blocks[D].complete:
return False
return True
def listening_thread(self):
while not self.complete:
print('waiting...')
request = recv_msg(self.pipe)
print('received')
if request == 'abort':
return
if request == 'progress':
int_dict = {}
for key,value in current_iteration.items():
int_dict[key] = value.value
# acquire lock on pipe
send_msg(self.pipe, pickle.dumps(int_dict))
print('progress sent')
# @static_vars(counter=0)
def add(self, _func, _instance_name=None, **kwargs):
"""
Add an instance (a function with specified kwargs)
"""
if _instance_name is None:
_instance_name = ''
def get_new_name(name, addons):
if name and not addons:
return name
elif name and addons:
return name + '-' + '-'.join(addons)
else:
return '-'.join(addons)
kwarg_params = dict()
kwarg_params_outer = dict()
for key, val in kwargs.items():
if isinstance(val, numpipe.parameter):
if val.outer:
kwarg_params_outer[key] = val
else:
kwarg_params[key] = val
if kwarg_params_outer and kwarg_params:
args1 = [p.arg for p in kwarg_params_outer.values()]
labels1 = itertools.product(*[p.labels for p in kwarg_params_outer.values()])
for vals1 in itertools.product(*args1):
post1 = list(filter(lambda x: x, next(labels1)))
args2 = [p.arg for p in kwarg_params.values()]
labels2 = zip(*[p.labels for p in kwarg_params.values()])
for vals2 in zip(*args2):
new_kwargs = copy(kwargs)
replace = dict(zip(kwarg_params.keys(), vals2))
replace.update(zip(kwarg_params_outer.keys(), vals1))
new_kwargs.update(replace)
post2 = post1 + list(filter(lambda x: x, next(labels2)))
new_name = get_new_name(_instance_name, post2)
self.add(_func, new_name, **new_kwargs)
return #TODO: return a block_collection that can call depends() on all or be indexed
elif kwarg_params_outer:
args = [p.arg for p in kwarg_params_outer.values()]
labels = itertools.product(*[p.labels for p in kwarg_params_outer.values()])
for vals in itertools.product(*args):
new_kwargs = copy(kwargs)
replace = dict(zip(kwarg_params_outer.keys(), vals))
new_kwargs.update(replace)
post = filter(lambda x: x, next(labels))
new_name = get_new_name(_instance_name, post)
self.add(_func, new_name, **new_kwargs)
return #TODO: return a block_collection that can call depends() on all or be indexed
elif kwarg_params:
args = [p.arg for p in kwarg_params.values()]
labels = zip(*[p.labels for p in kwarg_params.values()])
for vals in zip(*args):
new_kwargs = copy(kwargs)
replace = dict(zip(kwarg_params.keys(), vals))
new_kwargs.update(replace)
post = filter(lambda x: x, next(labels))
new_name = get_new_name(_instance_name, post)
self.add(_func, new_name, **new_kwargs)
return #TODO: return a block_collection that can call depends() on all or be indexed
if _instance_name in self.instance_counts[_func.__name__]:
self.instance_counts[_func.__name__][_instance_name] += 1
else:
self.instance_counts[_func.__name__][_instance_name] = 0
count = self.instance_counts[_func.__name__][_instance_name]
if _instance_name:
block_name = f'{_func.__name__}-{_instance_name}-{count}'
else:
block_name = f'{_func.__name__}-{count}'
filepath = f'{self.dirpath}/{self.filename}-{block_name}.h5'
self.blocks[block_name] = block(
deferred_function(_func, kwargs=kwargs, num_iterations=None),
target(filepath),
dependencies=self.instance_dependency.get(_func.__name__, None))
self.instances[_func.__name__].append(block_name)
return self.blocks[block_name]
def fix_block_names(self):
for func_name, D in self.instance_counts.items():
for name, counts in D.items():
if counts == 0:
old_block_name = f'{func_name}-{name}-0' if name else f'{func_name}-0'
new_block_name = f'{func_name}-{name}' if name else f'{func_name}'
self.blocks[new_block_name] = self.blocks[old_block_name]
self.blocks.pop(old_block_name)
index = self.instances[func_name].index(old_block_name)
self.instances[func_name][index] = new_block_name
filepath = f'{self.dirpath}/{self.filename}-{new_block_name}.h5'
self.blocks[new_block_name].target.filepath = filepath
@doublewrap
def cache(self, func, depends=None):
"""decorator to add a cached function to be conditionally ran"""
sig = signature(func)
if len(sig.parameters) == 0:
filepath = f'{self.dirpath}/{self.filename}-{func.__name__}.h5'
self.blocks[func.__name__] = block(
deferred_function(func, num_iterations=None),
target(filepath),
dependencies=depends)
else:
self.instances[func.__name__] = []
self.instance_counts[func.__name__] = dict()
if depends is not None:
if isinstance(depends, str) or not isinstance(depends, Iterable):
self.instance_dependency[func.__name__] = [depends]
else:
self.instance_dependency[func.__name__] = depends
return func
def at_end(self, func):
"""decorator to add a function to be executed at the end"""
self.at_end_functions[func.__name__] = deferred_function(func)
return func
def plots(self, func):
"""decorator to add a function to be executed at the end for plotting purposes"""
def wrap():
show_copy = plt.show
plt.show = lambda: None
mpl_tools.set_theme(self.args.theme)
send_figures = True
try:
ret = func()
if isinstance(ret, types.GeneratorType):
for anim in ret:
self.add_animation(anim)
elif ret is not None:
self.add_animation(ret)
except Exception as err:
traceback.print_exception(type(err), err, err.__traceback__)
if not plt.get_fignums():
plt.figure()
send_figures = False
message = '`@plots` threw an error before any figures were created'
else:
message = '`@plots` threw an error, some images may not be sent'
self.notifications.append(partial(notify.send_message,
message=message))
animated_figs = self.animations.keys()
if self.args.save_figs != '' or self.args.save != '':
arg = self.args.save_figs if self.args.save_figs != '' else self.args.save
for ext in self.args.save_format:
mpl_tools.save_figures(self.filename, arg,
self.args.figures, exempt=animated_figs,
ext=ext)
if self.args.save_anims != '' or self.args.save != '':
arg = self.args.save_anims if self.args.save_anims != '' else self.args.save
for fignum, anim in self.animations.items():
filename = f'{self.filename}_vid{fignum}.mp4'
filepath = mpl_tools.get_filepath(filename, arg)
mpl_tools.save_animation(anim, filepath)
if (self.num_blocks_executed > 0 or self.args.notify) and send_figures:
self.notifications.append(partial(notify.send_images,
filename=self.filename, exempt=animated_figs))
self.notifications.append(partial(notify.send_videos,
anims=self.animations.values()))
if self.args.notify:
self.send_notifications(check_idle=False, idle=True)
else:
self.send_notifications()
plt.show = show_copy
if self.args.figures is not None:
[plt.close(plt.figure(i)) for i in plt.get_fignums() if i not in self.args.figures]
plt.show()
self.at_end_functions[func.__name__] = deferred_function(wrap)
return wrap
def add_animation(self, anim):
"""add an animation to the saved animations"""
def add_single_animation(anim):
key = anim._fig.number
if key in self.animations:
self.animations[key].append(anim)
else:
self.animations[key] = [anim]
if isinstance(anim, Iterable):
for a in anim:
add_single_animation(a)
else:
add_single_animation(anim)
def send_notifications(self, **kwargs):
t = threading.Thread(target=partial(notify.send_notifications,
notifications=self.notifications,
delay=self.args.notify_delay,
**kwargs))
t.start()
def shared(self, class_type):
"""decorator to add a class for shared variables"""
return class_type
def display_functions(self):
display.display_message(self.blocks, self.instances, self.at_end_functions)
def _clean(self, filepaths):
"""clean a set of filepaths
Argumnets:
filepaths list of filepaths to hdf5 files
"""
if filepaths:
if not self.args.force:
delete = display.delete_message(filepaths)
if not delete:
return False
for filepath in filepaths:
os.remove(filepath)
return True
def _overwrite(self, targets):
"""Request if existing hdf5 file should be overwriten, return True if data is deleted
Argumnets:
targets list of targets to delete
"""
targets_to_delete = list(filter(lambda t: t.exists(), targets))
filepaths = [target.filepath for target in targets_to_delete]
if filepaths:
if not self.args.force:
delete = display.delete_message(filepaths)
if not delete:
return False
for target in targets_to_delete:
target.remove()
return True
def get_labels(self, name):
"""get a list of block labels for a given name"""
if name in self.blocks.keys():
return [name]
elif name in self.instances.keys():
return self.instances[name]
elif name[-3:] == '.h5':
actual_name = name[name.find('-')+1:-3]
if actual_name in self.blocks.keys():
return [actual_name]
raise ValueError(f"Invalid argument: function '{name}' does not correspond to any cached function")
def resolve_dependencies_down(self, blocks):
for label, block in self.blocks.items():
for D in copy(block.dependencies):
all_deps = self.instances.get(D)
if all_deps is not None:
block.dependencies.remove(D)
block.dependencies.extend(all_deps)
for label, block in self.blocks.items():
for D in block.dependencies:
if label not in self.blocks[D].children:
self.blocks[D].children.append(label)
if self.args.rerun is not None and len(self.args.rerun) != 0:
# DOWN the tree
block_dependencies = blocks
if not self.args.no_deps:
while block_dependencies:
new_blocks = dict()
for label, block in block_dependencies.items():
for child in block.children:
new_blocks[child] = self.blocks[child]
blocks.update(new_blocks)
block_dependencies = new_blocks
def resolve_dependencies_up(self, blocks):
# UP the tree
block_dependencies = blocks
while block_dependencies:
new_blocks = dict()
for label, block in block_dependencies.items():
for dependency in block.dependencies:
if not self.blocks[dependency].target.exists():
new_blocks[dependency] = self.blocks[dependency]
else:
self.blocks[dependency].complete = True
blocks.update(new_blocks)
block_dependencies = new_blocks
def delete(self):
"""
delete target data
"""
targets_to_delete = []
if len(self.args.delete) == 0:
targets_to_delete.extend([block.target for block in self.blocks.values()])
else:
for name in self.args.delete:
labels = self.get_labels(name)
targets_to_delete.extend([self.blocks[label].target for label in labels])
overwriten = self._overwrite(targets_to_delete)
if not overwriten:
display.abort_message()
return
def clean(self):
pathlist = pathlib.Path(self.dirpath).glob(f'{self.filename}-*.h5')
current = [block.target.filepath for block in self.blocks.values()]
filepaths = []
for path in pathlist:
path_str = str(path)
if path_str not in current:
filepaths.append(path_str)
confirm = self._clean(filepaths)
if not confirm:
display.abort_message()
def _init_logging(self):
self.logfile = pathlib.Path(self.dirpath) / f'{self.filename}.log'
logging.basicConfig(filename=self.logfile, filemode='w', level=logging.INFO,
format='%(levelname)s: %(message)s')
logging.captureWarnings(True)
|
WebServer.py
|
import re
import socket
import threading
from time import sleep
from typing import Tuple
from PyQt5.QtCore import pyqtSignal
from Network import StopThreading
class WebLogic:
signal_write_msg = pyqtSignal(str)
def __init__(self):
self.tcp_socket = None
self.sever_th = None
self.dir = None
self.client_socket_list = list()
def web_server_start(self, port: int) -> None:
"""
功能函数,WEB服务端开启的方法
"""
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 取消主动断开连接四次握手后的TIME_WAIT状态
self.tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 设置套接字为非阻塞式
self.tcp_socket.setblocking(False)
try:
self.tcp_socket.bind(("", port))
except Exception as ret:
msg = "请检查端口号\n"
self.signal_write_msg.emit(msg)
else:
self.tcp_socket.listen()
self.sever_th = threading.Thread(target=self.web_server_concurrency)
self.sever_th.start()
msg = "WEB服务端正在监听端口:%s\n" % str(port)
self.signal_write_msg.emit(msg)
def web_server_concurrency(self) -> None:
"""
功能函数,供创建线程的方法;
使用子线程用于监听并创建连接,使主线程可以继续运行,以免无响应
使用非阻塞式并发用于接收客户端消息,减少系统资源浪费,使软件轻量化
"""
while True:
try:
client_socket, client_address = self.tcp_socket.accept()
except Exception as ret:
sleep(0.002)
else:
client_socket.setblocking(False)
# 将创建的客户端套接字存入列表
self.client_socket_list.append((client_socket, client_address))
msg = f"WEB服务端已连接浏览器,IP:{client_address[0]}端口:{client_address[1]}\n"
self.signal_write_msg.emit(msg)
# 轮询客户端套接字列表,接收数据
for client, address in self.client_socket_list:
try:
recv_msg = client.recv(1024)
except Exception as ret:
pass
else:
if recv_msg:
msg = recv_msg.decode("utf-8")
msg_lines = msg.splitlines()
msg_dir = re.match(r"[^/]+(/[^ ]*)", msg_lines[0])
msg_dir = msg_dir.group(1)
msg = "来自IP:{}端口:{}:\n请求路径:{}\n".format(
address[0], address[1], msg_dir
)
self.signal_write_msg.emit(msg)
self.web_send(client, msg_dir)
else:
client.close()
self.client_socket_list.remove((client, address))
def web_send_msg(self, msg_dir) -> Tuple[bytes, bytes]:
"""
构造浏览器请求后返回的数据
:param msg_dir: 浏览器请求的路径
:return: header头文件,body数据
"""
# 指定主页路径
if str(msg_dir) == "/":
msg_dir = "/index.html"
dir = str(self.dir) + str(msg_dir)
else:
dir = str(self.dir) + str(msg_dir)
# 根据返回文件的类型,制作相应的Content-Type数据
file_header = self.web_file_header(msg_dir)
# 打开相应的文件,并读取
try:
with open(dir, "rb") as f:
file = f.read()
except Exception as ret:
# 如果打不开文件
file = "你要的东西不见了".encode("utf-8")
response_header = (
"HTTP/1.1 404 NOT FOUND\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: %d\r\n" % len(file)
+ file_header
+ "\r\n"
)
else:
# 如果打开了文件
response_header = (
"HTTP/1.1 200 OK\r\n"
+ "Connection: Keep-Alive\r\n"
+ "Content-Length: %d\r\n" % len(file)
+ file_header
+ "\r\n"
)
response_body = file
return response_header.encode("utf-8"), response_body
@staticmethod
def web_file_header(msg_dir) -> str:
"""
根据返回文件的类型,制作相应的Content-Type数据
:param msg_dir: 历览器请求的路径
:return: Content-Type数据
"""
try:
file_type = re.match(r"[^.]+\.(.*)$", msg_dir)
file_type = file_type.group(1)
if file_type == "png":
file_header = "Content-Type: image/%s; charset=utf-8\r\n" % file_type
elif file_type == "css" or file_type == "html":
file_header = "Content-Type: text/%s; charset=utf-8\r\n" % file_type
else:
file_header = "Content-Type: text/html; charset=utf-8\r\n"
except Exception as ret:
file_header = "Content-Type: text/html; charset=utf-8\r\n"
return file_header
else:
return file_header
def web_send(self, client, msg_dir) -> None:
"""
WEB服务器发送消息的方法
"""
try:
# 通过web_send_msg方法构造头文件及数据
header, body = self.web_send_msg(msg_dir)
client.send(header)
client.send(body)
msg = "WEB服务端已回复\n"
self.signal_write_msg.emit(msg)
except Exception as ret:
print(ret)
msg = "发送失败\n"
self.signal_write_msg.emit(msg)
def web_close(self) -> None:
"""
功能函数,关闭网络连接的方法
"""
try:
for client, address in self.client_socket_list:
client.close()
self.tcp_socket.close()
msg = "已断开网络\n"
self.signal_write_msg.emit(msg)
except Exception as ret:
pass
try:
StopThreading.stop_thread(self.sever_th)
except Exception:
pass
try:
StopThreading.stop_thread(self.client_th)
except Exception:
pass
NoLink = -1
WebServer = 4
|
checkpoint_utils.py
|
"""Implements similar functionality as tf.train.Checkpoint and tf.train.CheckpointManager.
https://gist.github.com/kevinzakka/5d345421f7abefd5dbaf6a77f829e70a.
"""
import logging
import os
import os.path as osp
import queue
import re
import signal
import threading
from glob import glob
import numpy as np
import torch
from ludwig.utils.fs_utils import file_lock
def mkdir(s):
"""Create a directory if it doesn't already exist."""
if not osp.exists(s):
os.makedirs(s)
def get_files(d, pattern, sort=True):
"""Return a list of files in a given directory.
Args:
d (str): The path to the directory.
pattern (str): The wildcard to filter files with.
sort (bool): Whether to sort the returned list. Assumes filenames contain a number value to sort by (tmp-001).
"""
files = glob(osp.join(d, pattern))
files = [f for f in files if osp.isfile(f)]
if sort:
def filter_numeric(s):
return re.sub("[^0-9]", "", s)
files.sort(key=lambda x: int(filter_numeric(os.path.basename(x).split(".")[0])))
return files
def traim_checkpoints_loop(q: queue.Queue, directory: str, max_to_keep: int):
"""Trim older checkpoints until `max_to_keep` remain."""
while True:
should_continue = q.get()
if should_continue is False:
return
with file_lock(directory, lock_file=".lock"):
# get a list of checkpoints in reverse
# chronological order
ckpts = get_files(directory, "*.ckpt")[::-1]
# remove until `max_to_keep` remain
num_remove = len(ckpts) - max_to_keep
while num_remove > 0:
ckpt_name = ckpts.pop()
os.remove(ckpt_name)
num_remove -= 1
class Checkpoint:
"""Save and restore model and optimizer states."""
def __init__(self, model, optimizer=None):
"""Constructor."""
self.model = model
self.optimizer = optimizer
def restore(self, save_path, device=None):
"""Restore a state from a saved checkpoint.
Args:
save_path (str): The filepath to the saved checkpoint.
device (torch.device): The device on which to
restore the state.
"""
try:
state = torch.load(save_path, map_location=device)
try:
self.model.load_state_dict(state["model_weights"])
if self.optimizer is not None:
self.optimizer.load_state_dict(state["optim_state"])
logging.info(f"Successfully loaded model weights from {save_path}.")
return True
except Exception as e:
# there was an issue loading the state which means
# either the model definition and saved weights
# do not agree or they were not saved in the first
# place.
# since this is a severe issue, we raise an error
# rather than allowing the program to proceed.
raise e
except FileNotFoundError as e:
logging.error(e)
return False
def save(self, save_path):
"""Save a state to disk.
Modified from brentyi/fannypack.
Args:
save_path (str): The name of the checkpoint to save.
"""
state = {"model_weights": self.model.state_dict()}
if self.optimizer is not None:
state["optim_state"] = self.optimizer.state_dict()
# ignore ctrl+c while saving
try:
orig_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, lambda _sig, _frame: None)
except ValueError:
# signal throws a ValueError if we're not in the main thread
orig_handler = None
# atomic save
save_dir = osp.dirname(save_path)
tmp_path = osp.join(save_dir, f"tmp-{np.random.randint(1e9)}.ckpt")
torch.save(state, tmp_path)
# replace is an atomic operation in python
# it is POSIX compliant according to docs
# https://docs.python.org/3/library/os.html#os.replace
os.replace(tmp_path, save_path)
logging.debug(f"Saved checkpoint at {save_path}.")
# restore SIGINT handler
if orig_handler is not None:
signal.signal(signal.SIGINT, orig_handler)
class CheckpointManager:
"""A model and optimizer checkpoint manager."""
def __init__(self, checkpoint, directory, device, max_to_keep=10):
"""Constructor.
Args:
checkpoint (Checkpoint): An instance of `Checkpoint`.
directory (str): The directory in which checkpoints will be saved.
device (torch.device): The computing device on which to restore
checkpoints.
max_to_keep (int): The maximum number of checkpoints to keep.
Amongst all saved checkpoints, checkpoints will be deleted
oldest first, until `max_to_keep` remain.
"""
assert max_to_keep > 0, "max_to_keep should be a positive integer."
self.checkpoint = checkpoint
self.directory = directory
self.max_to_keep = max_to_keep
self.device = device
self.latest_checkpoint = None
# create checkpoint directory if it doesn't
# already exist
mkdir(self.directory)
self.queue = queue.Queue()
self.trim_thread = threading.Thread(
target=traim_checkpoints_loop, args=(self.queue, self.directory, self.max_to_keep)
)
self.trim_thread.start()
def restore_or_initialize(self):
"""Restore items in checkpoint from the latest checkpoint file.
Returns:
The global iteration step. This is parsed from the latest
checkpoint file if one is found, else 0 is returned.
"""
ckpts = get_files(self.directory, "*.ckpt")
if ckpts:
last_ckpt = ckpts[-1]
status = self.checkpoint.restore(last_ckpt, self.device)
if not status:
logging.info("Could not restore latest checkpoint file.")
return 0
self.latest_checkpoint = last_ckpt
return int(osp.basename(last_ckpt).split(".")[0])
return 0
def save(self, global_step):
"""Create a new checkpoint.
Args:
global_step (int): The iteration number which will be used
to name the checkpoint.
"""
save_path = osp.join(self.directory, f"{global_step:09d}.ckpt")
self.checkpoint.save(save_path)
self.latest_checkpoint = save_path
self.queue.put(True)
def close(self):
self.queue.put(False)
self.trim_thread.join()
@staticmethod
def load_latest_checkpoint(checkpoint, directory, device):
ckpts = get_files(directory, "*.ckpt")
if ckpts:
last_ckpt = ckpts[-1]
checkpoint.restore(last_ckpt, device)
else:
logging.error(f"No checkpoints found in {directory}.")
|
main.py
|
#!/usr/bin/env python2.7
import argparse # new in Python2.7
import os
import time
import string
import atexit
import threading
import logging
import sys
import subprocess
from yapsy.PluginManager import PluginManager
# OPENBCI FUNCTIONS-------------------------------------------------------------
# Load the plugins from the plugin directory.
from user.User import User
manager = PluginManager()
manager.setPluginPlaces(["plugins"])
manager.collectPlugins()
# Type sudo python main.py -p /dev/ttyUSB0
def set_up_parser():
'''
Returns a parser object to set up OpenBCI.
:return: argparse.Namespace
'''
parser = argparse.ArgumentParser(description="OpenBCI 'user'")
parser.add_argument('--board', default=3, type=int)
parser.add_argument('-l', '--list', action='store_true',
help="List available plugins.")
parser.add_argument('-i', '--info', metavar='PLUGIN',
help="Show more information about a plugin.")
parser.add_argument('-p', '--port',
help="Port to connect to OpenBCI Dongle " +
"( ex /dev/ttyUSB0 or /dev/tty.usbserial-* )")
# baud rate is not currently used
parser.add_argument('-b', '--baud', default=115200, type=int,
help="Baud rate (not currently used)")
parser.add_argument('--no-filtering', dest='filtering',
action='store_false',
help="Disable notch filtering")
parser.set_defaults(filtering=True)
parser.add_argument('-d', '--daisy', dest='daisy',
action='store_true',
help="Force daisy mode (beta feature)")
# first argument: plugin name, then parameters for plugin
parser.add_argument('-a', '--add', metavar=('PLUGIN', 'PARAM'),
action='append', nargs='+',
help="Select which plugins to activate and set parameters.")
parser.add_argument('--log', dest='log', action='store_true',
help="Log program")
parser.set_defaults(daisy=False, log=False)
return parser
def set_logging(args):
'''
Sets up logging capability
:param args: argparse.Namespace
:return: None
'''
if args.log:
print("Logging Enabled: " + str(args.log))
logging.basicConfig(filename="OBCI.log",
format='%(asctime)s - %(levelname)s : %(message)s',
level=logging.DEBUG)
logging.getLogger('yapsy').setLevel(logging.DEBUG)
logging.info('---------LOG START-------------')
logging.info(args)
else:
print("main.py: Logging Disabled.")
def add_plugin(plugin_name, plugin_args, board, plug_list, callback_list):
plug_name = plugin_name
plug_args = plugin_args
plug = manager.getPluginByName(plug_name)
if plug == None:
# eg: if an import fail inside a plugin, yapsy skip it
print("Error: [", plug_name,
"] not found or could not be loaded. Check name and requirements.")
else:
print("\nActivating [", plug_name, "] plugin...")
if not plug.plugin_object.pre_activate(plug_args,
sample_rate=board.getSampleRate(),
eeg_channels=board.getNbEEGChannels(),
aux_channels=board.getNbAUXChannels()):
print("Error while activating [", plug_name,
"], check output for more info.")
else:
print("Plugin [", plug_name, "] added to the list")
plug_list.append(plug.plugin_object)
callback_list.append(plug.plugin_object)
def execute_plugins(board, objs_to_update):
'''
Streams data from OpenBCI and runs plugins
:param board: bci.OpenBCIBoard
:return:
'''
print("--------------INFO---------------")
print("User serial interface enabled...\n\
View command map at http://docs.openbci.com.\n\
Type /start to run -- and /stop before issuing new commands afterwards.\n\
Type /exit to exit. \n\
Board outputs are automatically printed as: \n\
% <tab> message\n\
$$$ biosignals end of message")
print("\n-------------BEGIN---------------")
# Init board state
# s: stop board streaming; v: soft reset of the 32-bit board (no effect with 8bit board)
s = 'sv'
# Tell the board to enable or not daisy module
if board.daisy:
s = s + 'C'
else:
s = s + 'c'
# d: Channels settings back to default
s = s + 'd'
while (s != "/exit"):
# Send char and wait for registers to set
if (not s):
pass
elif ("help" in s):
print("View command map at: \
http://docs.openbci.com/software/01-OpenBCI_SDK.\n\
For user interface: read README or view \
https://github.com/OpenBCI/OpenBCI_Python")
elif board.streaming and s != "/stop":
print(
"Error: the board is currently streaming data, please type '/stop' before issuing new commands.")
else:
# read silently incoming packet if set (used when stream is stopped)
flush = False
if ('/' == s[0]):
s = s[1:]
rec = False # current command is recognized or fot
if ("T:" in s):
lapse = int(s[string.find(s, "T:") + 2:])
rec = True
elif ("t:" in s):
lapse = int(s[string.find(s, "t:") + 2:])
rec = True
else:
lapse = -1
if ("start" in s):
if (fun != None):
# start streaming in a separate thread so we could always send commands in here
boardThread = threading.Thread(
target=board.start_streaming, args=(fun, lapse,
objs_to_update))
boardThread.daemon = True # will stop on exit
try:
boardThread.start()
except:
raise
else:
print("No function loaded")
rec = True
elif ('test' in s):
test = int(s[s.find("test") + 4:])
board.test_signal(test)
rec = True
elif ('stop' in s):
board.stop()
rec = True
flush = True
if rec == False:
print("Command not recognized...")
elif s:
for c in s:
if sys.hexversion > 0x03000000:
board.ser.write(bytes(c, 'utf-8'))
else:
board.ser.write(bytes(c))
time.sleep(0.100)
line = ''
time.sleep(0.1) # Wait to see if the board has anything to report
while board.ser.inWaiting():
c = board.ser.read().decode('utf-8')
line += c
time.sleep(0.001)
if (c == '\n') and not flush:
print('%\t' + line[:-1])
line = ''
if not flush:
print(line)
# Take user input
# s = input('--> ')
if sys.hexversion > 0x03000000:
s = input('--> ')
else:
s = raw_input('--> ')
pass
if __name__ == '__main__':
print(" MAIN.py")
# ADD ARGUMENTS
parser = set_up_parser()
args = parser.parse_args()
if not (args.port or args.list or args.info):
parser.error(
'No action requested. Use `--port serial_port` to connect to the bord; `--list` to show available plugins or `--info [plugin_name]` to get more information.')
# DECIDE WHICH VERSION OF OPENBCI BOARD IS BEING USED
if args.board == 3:
print("main.py: open_bci_v3...")
import open_bci_v3 as bci
elif args.board == 4:
print("main.py: open_bci_v_ganglion...")
import open_bci_v_ganglion as bci
else:
logging.warn('Board type not recognized')
# DISPLAY SETTINGS
print("\n------------SETTINGS-------------")
print("Notch filtering:", args.filtering)
# Logging
set_logging(args)
# SET UP BOARD
print("\n-------INSTANTIATING BOARD-------")
board = bci.OpenBCIBoard(port=args.port,
daisy=args.daisy,
filter_data=args.filtering,
scaled_output=True,
log=args.log)
# Info about effective number of channels and sampling rate
if board.daisy:
print("Force daisy mode:")
else:
print("No daisy:")
print(board.getNbEEGChannels(), "EEG channels and",
board.getNbAUXChannels(), "AUX channels at",
board.getSampleRate(), "Hz.")
# SET UP PLUGINS
print("\n------------PLUGINS--------------")
# Loop round the plugins and print their names.
# NOTE: IF A PREV INSTANT OF MAIN.PY INIT-ED A PLUGIN FUTURE INSTANCES
# REMEMBER!
print("Found plugins:")
for plugin in manager.getAllPlugins():
print("[", plugin.name, "]")
print()
# Fetch plugins THAT USER STATES, try to activate them, add to the list if OK
plug_list = []
callback_list = []
add_plugin('packets_to_csv', [], board, plug_list, callback_list)
if len(plug_list) == 0:
print(
"WARNING: no plugin selected, you will only be able to communicate with the board.")
fun = None
else:
fun = callback_list
def cleanUp():
board.disconnect()
print("Deactivating Plugins...")
for plug in plug_list:
plug.deactivate()
print("User.py exiting...")
atexit.register(cleanUp)
# SET UP USER---------------------------------------------------------------
# user = User('', '', '')
# VIEW----------------------------------------------------------------------
# Run Kivy in a separate place
import kivy_app
from kivy.clock import Clock
app = kivy_app.WallEEGApp()
def run(instance):
app.run()
# kivy_thread = threading.Thread(target=kivy_app.InSenseApp().run)
# try:
# kivy_thread.start()
# except:
# raise
# EXECUTE PLUGINS, STREAM FROM OPENBCI
openbci_update_thread = threading.Thread \
(target=execute_plugins, args=(board, [app.user.userECG]))
try:
openbci_update_thread.start()
except:
raise
run(0)
|
test_local_catalog.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Test behaviors specific to --use_local_catalog being enabled.
import pytest
import Queue
import random
import threading
import time
from multiprocessing.pool import ThreadPool
from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
RETRY_PROFILE_MSG = 'Retried query planning due to inconsistent metadata'
class TestCompactCatalogUpdates(CustomClusterTestSuite):
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_minimal_topic_updates_sync_ddl(self, unique_database):
"""
Start Impala cluster with minimal catalog update topics and local catalog enabled.
Run some smoke tests for SYNC_DDL to ensure that invalidations are propagated.
"""
self._do_test_sync_ddl(unique_database)
def _make_per_impalad_args(local_catalog_enabled):
assert isinstance(local_catalog_enabled, list)
args = ['--use_local_catalog=%s' % str(e).lower()
for e in local_catalog_enabled]
return "--per_impalad_args=" + ";".join(args)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
start_args=_make_per_impalad_args([True, False]),
catalogd_args="--catalog_topic_mode=mixed")
def test_mixed_topic_updates_sync_ddl(self, unique_database):
"""
Same as above, but with 'mixed' mode catalog and different configs
on the two different impalads used by the test.
"""
self._do_test_sync_ddl(unique_database)
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
start_args=_make_per_impalad_args([False, True]),
catalogd_args="--catalog_topic_mode=mixed")
def test_mixed_topic_updates_sync_ddl_2(self, unique_database):
"""
Same as above, but with opposite configurations for the two
impalads used in the test.
"""
self._do_test_sync_ddl(unique_database)
def _do_test_sync_ddl(self, unique_database):
""" Implementation details for above two tests. """
try:
impalad1 = self.cluster.impalads[0]
impalad2 = self.cluster.impalads[1]
client1 = impalad1.service.create_beeswax_client()
client2 = impalad2.service.create_beeswax_client()
view = "%s.my_view" % unique_database
# Try to describe the view before it exists - should get an error.
# This should prime any caches in impalad2.
err = self.execute_query_expect_failure(client2, "describe %s" % view)
assert 'Could not resolve' in str(err)
# Create it with SYNC_DDL from client 1.
query_options = {"sync_ddl": 1}
self.execute_query_expect_success(client1, "create view %s as select 1" % view,
query_options)
# It should be immediately visible from client 2.
self.execute_query_expect_success(client2, "describe %s" % view)
finally:
client1.close()
client2.close()
# Global 'INVALIDATE METADATA' is not supported on any impalads running in
# local_catalog mode, but should work on impalads running in the default
# mode.
# TODO(IMPALA-7506): support this!
for impalad in self.cluster.impalads:
client = impalad.service.create_beeswax_client()
try:
if "--use_local_catalog=true" in impalad.cmd:
err = self.execute_query_expect_failure(client, 'INVALIDATE METADATA')
assert 'not supported' in str(err)
else:
self.execute_query_expect_success(client, "INVALIDATE METADATA")
finally:
client.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_restart_catalogd(self, unique_database):
"""
Tests for the behavior of LocalCatalog when catalogd restarts.
"""
try:
impalad = self.cluster.impalads[0]
client = impalad.service.create_beeswax_client()
view = "%s.my_view" % unique_database
self.execute_query_expect_success(client, "create view %s as select 1" % view)
self.execute_query_expect_success(client, "select * from %s" % view)
# Should not have any detected restarts, initially.
self.assert_impalad_log_contains('WARNING', 'Detected catalog service restart',
expected_count=0)
# Kill catalogd, and while it's down, drop the view via HMS.
self.cluster.catalogd.kill()
# Drop the view via hive to ensure that when catalogd restarts,
# the impalads see the dropped view.
self.hive_client.drop_table(unique_database, "my_view", True)
# Start catalogd again. We should see the view disappear once the
# catalog pushes a new topic update.
self.cluster.catalogd.start()
NUM_ATTEMPTS = 30
for attempt in xrange(NUM_ATTEMPTS):
try:
self.assert_impalad_log_contains('WARNING', 'Detected catalog service restart')
err = self.execute_query_expect_failure(client, "select * from %s" % view)
assert "Could not resolve table reference" in str(err)
break
except Exception, e:
assert attempt < NUM_ATTEMPTS - 1, str(e)
time.sleep(1)
finally:
client.close()
class TestLocalCatalogRetries(CustomClusterTestSuite):
def _check_metadata_retries(self, queries):
"""
Runs 'queries' concurrently, recording any inconsistent metadata exceptions.
'queries' is a list of query strings. The queries are run by two threads,
each one selecting a random query to run in a loop.
"""
# Tracks number of inconsistent metadata exceptions.
inconsistent_seen = [0]
inconsistent_seen_lock = threading.Lock()
# Tracks query failures for all other reasons.
failed_queries = Queue.Queue()
try:
client1 = self.cluster.impalads[0].service.create_beeswax_client()
client2 = self.cluster.impalads[1].service.create_beeswax_client()
def stress_thread(client):
# Loops, picks a random query in each iteration, runs it,
# and looks for retries and InconsistentMetadataFetchExceptions.
attempt = 0
while inconsistent_seen[0] == 0 and attempt < 200:
q = random.choice(queries)
attempt += 1
try:
ret = self.execute_query_unchecked(client, q)
except Exception, e:
if 'InconsistentMetadataFetchException' in str(e):
with inconsistent_seen_lock:
inconsistent_seen[0] += 1
else:
failed_queries.put((q, str(e)))
threads = [threading.Thread(target=stress_thread, args=(c,))
for c in [client1, client2]]
for t in threads:
t.start()
for t in threads:
# When there are failures, they're observed quickly.
t.join(30)
assert failed_queries.empty(),\
"Failed query count non zero: %s" % list(failed_queries.queue)
finally:
client1.close()
client2.close()
return inconsistent_seen[0]
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_fetch_metadata_retry(self):
"""
Tests that operations that fetch metadata (excluding those fetches needed for
query planning) retry when they hit an InconsistentMetadataFetchException.
"""
queries = [
"show column stats functional.alltypes",
"show table stats functional.alltypes",
"describe extended functional.alltypes",
"show tables in functional like 'all*'",
"show files in functional.alltypes",
"refresh functional.alltypes"]
seen = self._check_metadata_retries(queries)
assert seen == 0, "Saw inconsistent metadata"
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true --local_catalog_max_fetch_retries=0",
catalogd_args="--catalog_topic_mode=minimal")
def test_replan_limit(self):
"""
Tests that the flag to limit the number of retries works and that
an inconsistent metadata exception when running concurrent reads/writes
is seen. With the max retries set to 0, no retries are expected and with
the concurrent read/write workload, an inconsistent metadata exception is
expected.
"""
queries = [
'refresh functional.alltypes',
'refresh functional.alltypes partition (year=2009, month=4)',
'select count(*) from functional.alltypes where month=4']
seen = self._check_metadata_retries(queries)
assert seen > 0, "Did not observe inconsistent metadata"
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_replan_on_stale_metadata(self, unique_database):
"""
Tests that when metadata is inconsistent while planning a query,
the query planner retries the query.
"""
try:
impalad1 = self.cluster.impalads[0]
impalad2 = self.cluster.impalads[1]
client1 = impalad1.service.create_beeswax_client()
client2 = impalad2.service.create_beeswax_client()
# Create a view in client 1, cache the table list including that view in
# client 2, and then drop it in client 1. While we've still cached the
# table list, try to describe the view from client 2 -- it should fail
# with the normal error message even though it had the inconsistent cache.
view = "%s.my_view" % unique_database
self.execute_query_expect_success(client1, "create view %s as select 1" % view)
self.execute_query_expect_success(client2, "show tables")
self.execute_query_expect_success(client1, "drop view %s" % view)
err = self.execute_query_expect_failure(client2, "describe %s" % view)
assert "Could not resolve path" in str(err)
# Run a mix of concurrent REFRESH and queries against different subsets
# of partitions. This causes partial views of the table to get cached,
# and then as the new partitions are loaded, we detect the version skew
# and issue re-plans. We run the concurrent workload until the profile
# indicates that a replan has happened.
# We expect stress_thread to cause a re-plan. The counter is stored in a
# mutable container so that stress_thread can update it.
# TODO: consolidate with _check_metadata_retries.
replans_seen = [0]
replans_seen_lock = threading.Lock()
# Queue to propagate exceptions from failed queries, if any.
failed_queries = Queue.Queue()
def stress_thread(client):
while replans_seen[0] == 0:
# TODO(todd) EXPLAIN queries don't currently yield a profile, so
# we have to actually run a COUNT query.
q = random.choice([
'invalidate metadata functional.alltypes',
'select count(*) from functional.alltypes where month=4',
'select count(*) from functional.alltypes where month=5'])
try:
ret = self.execute_query_expect_success(client, q)
except Exception as e:
failed_queries.put((q, str(e)))
continue
if RETRY_PROFILE_MSG in ret.runtime_profile:
with replans_seen_lock:
replans_seen[0] += 1
threads = [threading.Thread(target=stress_thread, args=(c,))
for c in [client1, client2]]
for t in threads:
t.start()
for t in threads:
t.join(30)
assert failed_queries.empty(), "Failed queries encountered: %s" %\
list(failed_queries.queue)
assert replans_seen[0] > 0, "Did not trigger any re-plans"
finally:
client1.close()
client2.close()
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true --inject_latency_after_catalog_fetch_ms=50",
catalogd_args="--catalog_topic_mode=minimal",
cluster_size=1)
def test_invalidation_races(self, unique_database):
"""
Regression test for IMPALA-7534: races where invalidation of the table list
could be skipped, causing spurious "table not found" errors.
"""
test_self = self
class ThreadLocalClient(threading.local):
def __init__(self):
self.c = test_self.create_impala_client()
t = ThreadPool(processes=8)
tls = ThreadLocalClient()
def do_table(i):
for q in [
"create table {db}.t{i} (i int)",
"describe {db}.t{i}",
"drop table {db}.t{i}",
"create database {db}_{i}",
"show tables in {db}_{i}",
"drop database {db}_{i}"]:
self.execute_query_expect_success(tls.c, q.format(
db=unique_database, i=i))
# Prior to fixing IMPALA-7534, this test would fail within 20-30 iterations,
# so 100 should be quite reliable as a regression test.
NUM_ITERS = 100
for i in t.imap_unordered(do_table, xrange(NUM_ITERS)):
pass
class TestObservability(CustomClusterTestSuite):
def get_catalog_cache_metrics(self, impalad):
""" Returns catalog cache metrics as a dict by scraping the json metrics page on the
given impalad"""
child_groups =\
impalad.service.get_debug_webpage_json('metrics')['metric_group']['child_groups']
for group in child_groups:
if group['name'] != 'impala-server': continue
# Filter catalog cache metrics.
for child_group in group['child_groups']:
if child_group['name'] != 'catalog': continue
metrics_data = [(metric['name'], metric['value'])
for metric in child_group['metrics'] if 'catalog.cache' in metric['name']]
return dict(metrics_data)
assert False, "Catalog cache metrics not found in %s" % child_groups
@pytest.mark.execute_serially
@CustomClusterTestSuite.with_args(
impalad_args="--use_local_catalog=true",
catalogd_args="--catalog_topic_mode=minimal")
def test_cache_metrics(self, unique_database):
"""
Test that profile output includes impalad local cache metrics. Also verifies that
the daemon level metrics are updated between query runs.
"""
try:
impalad = self.cluster.impalads[0]
# Make sure local catalog mode is enabled and visible on web UI.
assert '(Local Catalog Mode)' in impalad.service.read_debug_webpage('/')
# Make sure /catalog_object endpoint is disabled on web UI.
assert 'No URI handler for '/catalog_object'' \
in impalad.service.read_debug_webpage('/catalog_object')
client = impalad.service.create_beeswax_client()
cache_hit_rate_metric_key = "catalog.cache.hit-rate"
cache_miss_rate_metric_key = "catalog.cache.miss-rate"
cache_hit_count_metric_key = "catalog.cache.hit-count"
cache_request_count_metric_key = "catalog.cache.request-count"
cache_request_count_prev_run = 0
cache_hit_count_prev_run = 0
test_table_name = "%s.test_cache_metrics_test_tbl" % unique_database
# A mix of queries of various types.
queries_to_test = ["select count(*) from functional.alltypes",
"explain select count(*) from functional.alltypes",
"create table %s (a int)" % test_table_name,
"drop table %s" % test_table_name]
for _ in xrange(0, 10):
for query in queries_to_test:
ret = self.execute_query_expect_success(client, query)
assert ret.runtime_profile.count("Frontend:") == 1
assert ret.runtime_profile.count("CatalogFetch") > 1
cache_metrics = self.get_catalog_cache_metrics(impalad)
cache_hit_rate = cache_metrics[cache_hit_rate_metric_key]
cache_miss_rate = cache_metrics[cache_miss_rate_metric_key]
cache_hit_count = cache_metrics[cache_hit_count_metric_key]
cache_request_count = cache_metrics[cache_request_count_metric_key]
assert cache_hit_rate > 0.0 and cache_hit_rate < 1.0
assert cache_miss_rate > 0.0 and cache_miss_rate < 1.0
assert cache_hit_count > cache_hit_count_prev_run,\
"%s not updated between two query runs, query - %s"\
% (cache_hit_count_metric_key, query)
assert cache_request_count > cache_request_count_prev_run,\
"%s not updated betweeen two query runs, query - %s"\
% (cache_request_count_metric_key, query)
cache_hit_count_prev_run = cache_hit_count
cache_request_count_prev_run = cache_request_count
finally:
client.close()
|
GraphGadgetTest.py
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import imath
import inspect
import time
import IECore
import Gaffer
import GafferUI
import GafferTest
import GafferUITest
class NestedPlugTestNode( Gaffer.Node ) :
def __init__( self ) :
Gaffer.Node.__init__( self )
IECore.registerRunTimeTyped( NestedPlugTestNode )
Gaffer.Metadata.registerValue( NestedPlugTestNode, "c", "nodule:type", "GafferUI::CompoundNodule" )
class GraphGadgetTest( GafferUITest.TestCase ) :
def testRemovedNodesDontHaveGadgets( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
n = GafferTest.AddNode()
s["add1"] = n
self.assertIsNotNone( g.nodeGadget( n ) )
s.deleteNodes( filter = Gaffer.StandardSet( [ n ] ) )
self.assertIsNone( g.nodeGadget( n ) )
def testRemovedNodesDontHaveConnections( self ) :
s = Gaffer.ScriptNode()
n = GafferTest.AddNode()
s["add1"] = n
s["add2"] = GafferTest.AddNode()
s["add1"]["op1"].setInput( s["add2"]["sum"] )
g = GafferUI.GraphGadget( s )
s.deleteNodes( filter = Gaffer.StandardSet( [ s["add1"] ] ) )
self.assertIsNone( g.connectionGadget( n["op1"] ) )
def testCreateWithFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testEditFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add1"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add2"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add1"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testUnhidingConnectedDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
nodeFilter.add( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
self.assertIsNotNone( g.connectionGadget( script["add2"]["op1"] ) )
def testCreatingWithHiddenSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertTrue( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertEqual( c.srcNodule(), None )
def testHidingConnectedDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
self.assertIsNotNone( g.connectionGadget( script["add2"]["op1"] ) )
nodeFilter.remove( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
def testHidingConnectedSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add1"], script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
nodeFilter.remove( script["add1"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
self.assertTrue( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
def testConnectingInvisibleDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
def testConnectingHiddenDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
def testConnectingHiddenSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
def testConnectingHiddenSrcNodesAndReshowing( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
nodeFilter.add( script["add1"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
def testChangingFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
g.setFilter( nodeFilter2 )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testChangingFilterAndEditingOriginal( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet()
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
g.setFilter( nodeFilter2 )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add1"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testConnectionsForNestedPlugs( self ) :
script = Gaffer.ScriptNode()
script["n"] = NestedPlugTestNode()
script["n"]["c"] = Gaffer.Plug()
script["n"]["c"]["i"] = Gaffer.IntPlug()
script["n2"] = NestedPlugTestNode()
script["n2"]["c"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["c"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n"]["c"]["i"].setInput( script["n2"]["c"]["o"] )
s = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, s )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.remove( script["n2"] )
self.assertIsNone( g.nodeGadget( script["n2"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.add( script["n2"] )
self.assertIsNotNone( g.nodeGadget( script["n2"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.remove( script["n"] )
self.assertIsNone( g.nodeGadget( script["n"] ) )
self.assertIsNone( g.connectionGadget( script["n"]["c"]["i"] ) )
s.add( script["n"] )
self.assertIsNotNone( g.nodeGadget( script["n"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
def testRemovePlugWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
with Gaffer.UndoScope( script ) :
removedPlug = script["n2"]["i"]
del script["n2"]["i"]
self.assertIsNone( g.connectionGadget( removedPlug ) )
script.undo()
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
def testRemovePlugWithOutputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
with Gaffer.UndoScope( script ) :
del script["n1"]["o"]
self.assertIsNone( g.connectionGadget( script["n2"]["i"] ) )
script.undo()
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
def testConnectionBound( self ) :
for i in range( 0, 100 ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadget( script["n2"]["i"] )
gb = imath.Box3f()
gb.extendBy( g.nodeGadget( script["n1"] ).bound() )
gb.extendBy( g.nodeGadget( script["n2"] ).bound() )
gb.setMin( gb.min() - imath.V3f( 10 ) )
gb.setMax( gb.max() + imath.V3f( 10 ) )
b = c.bound()
self.assertFalse( b.isEmpty() )
self.assertTrue( IECore.BoxAlgo.contains( gb, b ) )
def testNoFilter( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertTrue( g.getRoot().isSame( s ) )
self.assertTrue( g.getFilter() is None )
self.assertTrue( g.nodeGadget( s["n1"] ) )
s["n2"] = Gaffer.Node()
self.assertTrue( g.nodeGadget( s["n1"] ) )
def testFilterIsChildSet( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
g = GafferUI.GraphGadget( s, Gaffer.ChildSet( s ) )
self.assertTrue( g.nodeGadget( s["n1"] ) )
l = len( g )
s["n2"] = Gaffer.Node()
self.assertTrue( g.nodeGadget( s["n2"] ) )
self.assertEqual( len( g ), l + 1 )
def testSetRoot( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
f = Gaffer.StandardSet( [ s["b"] ] )
g = GafferUI.GraphGadget( s, f )
self.assertTrue( g.nodeGadget( s["b"] ) )
self.assertFalse( g.nodeGadget( s["b"]["n"] ) )
g.setRoot( s["b"] )
self.assertTrue( g.getRoot().isSame( s["b"] ) )
self.assertEqual( g.getFilter(), None )
self.assertTrue( g.nodeGadget( s["b"]["n"] ) )
self.assertFalse( g.nodeGadget( s["b"] ) )
def testRootChangedSignal( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
roots = []
previousRoots = []
def f( gg, previousRoot ) :
self.assertTrue( gg.isSame( g ) )
roots.append( gg.getRoot() )
previousRoots.append( previousRoot )
g = GafferUI.GraphGadget( s )
g.rootChangedSignal().connect( f, scoped = False )
self.assertEqual( len( roots ), 0 )
self.assertEqual( len( previousRoots ), 0 )
g.setRoot( s["b"] )
self.assertEqual( len( roots ), 1 )
self.assertTrue( roots[0].isSame( s["b"] ) )
self.assertEqual( len( previousRoots ), 1 )
self.assertTrue( previousRoots[0].isSame( s ) )
g.setRoot( s["b"] )
self.assertEqual( len( roots ), 1 )
self.assertTrue( roots[0].isSame( s["b"] ) )
self.assertEqual( len( previousRoots ), 1 )
self.assertTrue( previousRoots[0].isSame( s ) )
g.setRoot( s )
self.assertEqual( len( roots ), 2 )
self.assertTrue( roots[1].isSame( s ) )
self.assertEqual( len( previousRoots ), 2 )
self.assertTrue( previousRoots[1].isSame( s["b"] ) )
def testSetNodePosition( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertFalse( g.hasNodePosition( s["n"] ) )
g.setNodePosition( s["n"], imath.V2f( -100, 2000 ) )
self.assertEqual( g.getNodePosition( s["n"] ), imath.V2f( -100, 2000 ) )
self.assertTrue( g.hasNodePosition( s["n"] ) )
def testPlugConnectionGadgets( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add4"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
script["add4"]["op2"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadgets( script["add1"]["sum"] )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
c = g.connectionGadgets( script["add1"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
self.assertEqual( len( c ), 0 )
c = g.connectionGadgets( script["add2"]["sum"] )
self.assertEqual( len( c ), 2 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add3"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
c = g.connectionGadgets( script["add2"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add4"]["op2"] ) )
def testNodeConnectionGadgets( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add4"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
script["add4"]["op2"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadgets( script["add1"] )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
c = g.connectionGadgets( script["add1"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
self.assertEqual( len( c ), 0 )
c = g.connectionGadgets( script["add2"] )
self.assertEqual( len( c ), 3 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add3"]["op1"] ) )
self.assertTrue( c[2].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[2].dstNodule().plug().isSame( script["add4"]["op2"] ) )
c = g.connectionGadgets( script["add2"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
self.assertEqual( len( c ), 2 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
def testInternalConnectionsNotShown( self ) :
# make sure they're not shown when they exist before graph visualisation
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add1"]["sum"].setInput( script["add1"]["op1"] )
script["add1"]["op1"].setInput( script["add1"]["op2"] )
g = GafferUI.GraphGadget( script )
self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
# make sure they're not shown when they're made after graph visualisation
script = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( script )
script["add1"] = GafferTest.AddNode()
script["add1"]["sum"].setInput( script["add1"]["op1"] )
script["add1"]["op1"].setInput( script["add1"]["op2"] )
self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
def testConnectionMinimisedAccessors( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeInputConnectionsMinimised( script["add3"], True )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeOutputConnectionsMinimised( script["add2"], True )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertTrue( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeOutputConnectionsMinimised( script["add2"], False )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeInputConnectionsMinimised( script["add3"], False )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
def testConnectionMinimisation( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
g = GafferUI.GraphGadget( script )
g.setNodeOutputConnectionsMinimised( script["add1"], True )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
c1 = g.connectionGadget( script["add2"]["op1"] )
self.assertTrue( c1.getMinimised() )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
c2 = g.connectionGadget( script["add3"]["op1"] )
self.assertFalse( c2.getMinimised() )
g.setNodeInputConnectionsMinimised( script["add2"], True )
self.assertTrue( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
g.setNodeOutputConnectionsMinimised( script["add1"], False )
self.assertTrue( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
g.setNodeInputConnectionsMinimised( script["add2"], False )
self.assertFalse( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
def testNodeGadgetCreatorReturningNull( self ) :
class InvisibleNode( GafferTest.AddNode ) :
def __init__( self, name = "InvisibleNode" ) :
GafferTest.AddNode.__init__( self, name )
IECore.registerRunTimeTyped( InvisibleNode )
GafferUI.NodeGadget.registerNodeGadget( InvisibleNode, lambda node : None )
script = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( script )
script["n1"] = InvisibleNode()
script["n2"] = InvisibleNode()
self.assertEqual( g.nodeGadget( script["n1"] ), None )
self.assertEqual( g.nodeGadget( script["n2"] ), None )
script["n2"]["op1"].setInput( script["n1"]["sum"] )
self.assertEqual( g.connectionGadget( script["n2"]["op1"] ), None )
# in case it wasn't clear, hiding the nodes has zero
# effect on their computations.
script["n1"]["op1"].setValue( 12 )
script["n1"]["op2"].setValue( 13 )
script["n2"]["op2"].setValue( 100 )
self.assertEqual( script["n2"]["sum"].getValue(), 125 )
def testUpstreamNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# ^
# |
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["c"]["op2"].setInput( script["d"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["c"] ) ]
self.assertEqual( len( u ), 3 )
self.assertEqual( set( u ), set( [ "a", "b", "d" ] ) )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
self.assertEqual( len( u ), 5 )
self.assertEqual( set( u ), set( [ "a", "b", "d", "c", "e" ] ) )
# the degreesOfSeparation argument should limit the depth
# of the search.
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( len( u ), 2 )
self.assertEqual( set( u ), set( [ "b", "d" ] ) )
# filtered nodes should be ignored
g.setFilter( Gaffer.StandardSet( [ script["f"], script["e"], script["a"] ] ) )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
self.assertEqual( u, [ "e" ] )
def testDownstreamNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# |
# v
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["d"]["op1"].setInput( script["c"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["b"] ) ]
self.assertEqual( len( u ), 4 )
self.assertEqual( set( u ), set( [ "c", "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["e"] ) ]
self.assertEqual( len( u ), 1 )
self.assertEqual( set( u ), set( [ "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( len( u ), 2 )
self.assertEqual( set( u ), set( [ "d", "e" ] ) )
def testConnectedNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# |
# v
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["d"]["op1"].setInput( script["c"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
# test traversing in both directions
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["b"] ) ]
self.assertEqual( set( u ), set( [ "a", "c", "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"] ) ]
self.assertEqual( set( u ), set( [ "a", "b", "c", "d", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "b", "d", "e" ] ) )
# test traversing upstream
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.In ) ]
self.assertEqual( set( u ), set( [ "a", "b" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.In, degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "b" ] ) )
# test traversing downstream
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.Out ) ]
self.assertEqual( set( u ), set( [ "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.Out, degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "d", "e" ] ) )
# test that invisible nodes are ignored
g.setFilter( Gaffer.StandardSet( [ script["f"], script["e"], script["c"] ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"] ) ]
self.assertEqual( set( u ), set( [ "f", "c" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"], direction = Gaffer.Plug.Direction.In ) ]
self.assertEqual( set( u ), set( [ "c" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"], direction = Gaffer.Plug.Direction.Out ) ]
self.assertEqual( set( u ), set( [ "f" ] ) )
def testSelectionHighlighting( self ) :
script = Gaffer.ScriptNode()
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script.selection().add( script["a"] )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().add( script["b"] )
self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().remove( script["a"] )
self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().clear()
self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
def testNoDuplicatePositionPlugsAfterPasting( self ) :
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( script )
g.setNodePosition( script["n"], imath.V2f( 1, 2 ) )
self.assertTrue( g.hasNodePosition( script["n"] ) )
script.execute( script.serialise( script, Gaffer.StandardSet( [ script["n"] ] ) ) )
self.assertTrue( "__uiPosition" in script["n1"] )
self.assertFalse( "__uiPosition1" in script["n1"] )
def testErrorAndDelete( self ) :
# Create a script with a dodgy node,
# and a GraphGadget for displaying it.
script = Gaffer.ScriptNode()
script["n"] = GafferTest.BadNode()
graphGadget = GafferUI.GraphGadget( script )
# Arrange for the node to error on
# a background thread.
def f() :
with IECore.IgnoredExceptions( Exception ) :
script["n"]["out1"].getValue()
r = threading.Thread( target = f )
r.start()
r.join()
# Delete the node on the
# foreground thread - this will
# remove the NodeGadget inside
# the GraphGadget.
del script["n"]
# Run idle events. Woe betide any NodeGadget
# implementation assuming it will still be
# alive at arbitrary points in the future!
self.waitForIdle( 1000 )
def testMovePlugWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
script["n3"] = Gaffer.Node()
script["n3"]["p"] = script["n2"]["p"]
connection = g.connectionGadget( script["n3"]["p"] )
dstNodule = connection.dstNodule()
srcNodule = connection.srcNodule()
self.assertTrue( dstNodule.plug().isSame( script["n3"]["p"] ) )
self.assertTrue( srcNodule.plug().isSame( script["n1"]["p"] ) )
self.assertTrue( g.nodeGadget( script["n1"] ).isAncestorOf( srcNodule ) )
self.assertTrue( g.nodeGadget( script["n3"] ).isAncestorOf( dstNodule ) )
def testMovePlugWithInputConnectionOutsideGraph( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
n3 = Gaffer.Node()
n3["p"] = script["n2"]["p"]
self.assertEqual( g.connectionGadget( n3["p"] ), None )
def testRemoveNoduleWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n2"] ).nodule( script["n2"]["p"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["p"] ) is not None )
Gaffer.Metadata.registerValue( script["n2"]["p"], "nodule:type", "" )
self.assertTrue( g.nodeGadget( script["n2"] ).nodule( script["n2"]["p"] ) is None )
self.assertTrue( g.connectionGadget( script["n2"]["p"] ) is None )
def testRemoveNoduleWithOutputConnections( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"] = Gaffer.Node()
script["n2"]["in"] = Gaffer.Plug()
script["n2"]["in"].setInput( script["n1"]["out"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule().plug().isSame( script["n1"]["out"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "" )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule() is None )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "GafferUI::StandardNodule" )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule().plug().isSame( script["n1"]["out"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
def testAddNoduleWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
script["n"]["in"] = Gaffer.Plug()
script["n"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n"]["out"].setInput( script["n"]["in"] )
Gaffer.Metadata.registerValue( script["n"]["out"], "nodule:type", "" )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n"] ).nodule( script["n"]["out"] ) is None )
self.assertTrue( g.connectionGadget( script["n"]["out"] ) is None )
Gaffer.Metadata.registerValue( script["n"]["out"], "nodule:type", "GafferUI::StandardNodule" )
self.assertTrue( g.nodeGadget( script["n"] ).nodule( script["n"]["out"] ) is not None )
self.assertTrue( g.connectionGadget( script["n"]["out"] ) is None )
def testAddNoduleWithOutputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"] = Gaffer.Node()
script["n2"]["in"] = Gaffer.Plug()
script["n2"]["in"].setInput( script["n1"]["out"] )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "" )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n1"] ).nodule( script["n1"]["out"] ) is None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ).srcNodule() is None )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "GafferUI::StandardNodule" )
self.assertTrue( g.nodeGadget( script["n1"] ).nodule( script["n1"]["out"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ).srcNodule() is not None )
def testRemoveNonNodulePlug( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.Plug()
Gaffer.Metadata.registerValue( s["n"]["p"], "nodule:type", "" )
g = GafferUI.GraphGadget( s )
self.assertTrue( g.nodeGadget( s["n"] ).nodule( s["n"]["p"] ) is None )
# Once upon a time, this would crash.
del s["n"]["p"]
def testEnabledException( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
s["e"] = Gaffer.Expression()
s["e"].setExpression( "parent['n']['enabled'] = undefinedVariable" )
g = GafferUI.GraphGadget( s )
self.assertTrue( g.nodeGadget( s["n"] ) is not None )
def testLayoutAccessors( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
l = g.getLayout()
self.assertTrue( isinstance( l, GafferUI.StandardGraphLayout ) )
l2 = GafferUI.StandardGraphLayout()
g.setLayout( l2 )
self.assertTrue( g.getLayout().isSame( l2 ) )
g.setLayout( l )
self.assertTrue( g.getLayout().isSame( l ) )
def testUnpositionedNodeGadgets( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n"] = Gaffer.Node()
self.assertEqual( g.unpositionedNodeGadgets(), [ g.nodeGadget( s["n"] ) ] )
g.setNodePosition( s["n"], imath.V2f( 0 ) )
self.assertEqual( g.unpositionedNodeGadgets(), [] )
def testInputConnectionMaintainedOnNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n2"]["op1"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["op1"] ) ) )
def testOutputConnectionMaintainedOnNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n1"]["sum"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["op1"] ) ) )
def testInputConnectionMaintainedOnNestedNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.ArrayPlugNode()
Gaffer.Metadata.registerValue( s["n2"]["in"], "nodule:type", "GafferUI::CompoundNodule" )
s["n2"]["in"][0].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["in"][0] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n2"]["in"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["in"][0] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["in"][0] ) ) )
def testNodeGadgetMetadataChanges( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
def assertBothVisible() :
ng1 = g.nodeGadget( s["n1"] )
ng2 = g.nodeGadget( s["n2"] )
c = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( isinstance( ng1, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( ng2, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( c, GafferUI.StandardConnectionGadget ) )
self.assertTrue( c.srcNodule().isSame( ng1.nodule( s["n1"]["sum"] ) ) )
self.assertTrue( c.dstNodule().isSame( ng2.nodule( s["n2"]["op1"] ) ) )
assertBothVisible()
Gaffer.Metadata.registerValue( s["n1"], "nodeGadget:type", "" )
def assertN1Hidden() :
ng1 = g.nodeGadget( s["n1"] )
ng2 = g.nodeGadget( s["n2"] )
c = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( ng1 is None )
self.assertTrue( isinstance( ng2, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( c, GafferUI.StandardConnectionGadget ) )
self.assertTrue( c.srcNodule() is None )
self.assertTrue( c.dstNodule().isSame( ng2.nodule( s["n2"]["op1"] ) ) )
assertN1Hidden()
Gaffer.Metadata.registerValue( s["n2"], "nodeGadget:type", "" )
def assertBothHidden() :
self.assertTrue( g.nodeGadget( s["n1"] ) is None )
self.assertTrue( g.nodeGadget( s["n2"] ) is None )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is None )
assertBothHidden()
Gaffer.Metadata.registerValue( s["n2"], "nodeGadget:type", "GafferUI::StandardNodeGadget" )
assertN1Hidden()
Gaffer.Metadata.registerValue( s["n1"], "nodeGadget:type", "GafferUI::StandardNodeGadget" )
assertBothVisible()
def testConnectionGadgetsIncludesDanglingConnections( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n1"]["c"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"] = Gaffer.Node()
s["n2"]["c"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"]["c"]["r"].setInput( s["n1"]["c"]["r"] )
Gaffer.Metadata.registerValue( s["n2"]["c"], "compoundNumericNodule:childrenVisible", True )
g = GafferUI.GraphGadget( s )
c = g.connectionGadgets( s["n2"]["c"]["r"] )
self.assertEqual( len( c ), 1 )
self.assertEqual( c[0].dstNodule(), g.nodeGadget( s["n2"] ).nodule( s["n2"]["c"]["r"] ) )
self.assertIsNone( c[0].srcNodule() )
def testChangeNodeGadgetForUnviewedNode( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertIsNotNone( g.nodeGadget( s["b"] ) )
self.assertIsNone( g.nodeGadget( s["b"]["n"] ) )
Gaffer.Metadata.registerValue( s["b"]["n"], "nodeGadget:type", "GafferUI::AuxiliaryNodeGadget" )
self.assertIsNone( g.nodeGadget( s["b"]["n"] ) )
def testActivePlugsAndNodes( self ) :
s = Gaffer.ScriptNode()
# Basic network with single and multiple inputs
s["add1"] = GafferTest.AddNode()
s["add2"] = GafferTest.AddNode()
s["add3"] = GafferTest.AddNode()
s["add4"] = GafferTest.AddNode()
s["add1"]["op1"].setInput( s["add2"]["sum"] )
s["add1"]["op2"].setInput( s["add3"]["sum"] )
s["add2"]["op1"].setInput( s["add4"]["sum"] )
c = Gaffer.Context()
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add1"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add1"]["op1"], s["add1"]["op2"], s["add2"]["op1"] ] ) )
self.assertEqual( set( nodes ), set( [ s["add1"], s["add2"], s["add3"], s["add4"] ] ) )
# Test disabling
s["add2"]["op1"].setInput( None )
s["add1"]["enabled"].setValue( False )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add1"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add1"]["op1"] ] ) )
self.assertEqual( set( nodes ), set( [ s["add1"], s["add2"] ] ) )
s["add1"]["expr"] = Gaffer.Expression()
s["add1"]["expr"].setExpression( 'parent["enabled"] = True', "python" )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add1"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add1"]["op1"], s["add1"]["op2"], s["add1"]["enabled"] ] ) )
self.assertEqual( set( nodes ), set( [ s["add1"], s["add2"], s["add3"], s["add1"]["expr"] ] ) )
s["add1"]["expr"].setExpression( 'parent["enabled"] = False', "python" )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add1"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add1"]["op1"], s["add1"]["enabled"] ] ) )
self.assertEqual( set( nodes ), set( [ s["add1"], s["add2"], s["add1"]["expr"] ] ) )
del s["add1"]["expr"]
s["add1"]["enabled"].setValue( True )
# Setup switch instead
s["add1"]["op1"].setInput( None )
s["add1"]["op2"].setInput( None )
s["switch"] = Gaffer.Switch()
s["switch"].setup( Gaffer.FloatPlug() )
s["switch"]["in"][0].setInput( s["add3"]["sum"] )
s["switch"]["in"][1].setInput( s["add4"]["sum"] )
# Test with index set to a hardcoded value, which will just follow the connections created inside
# the Switch when the index isn't computed.
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][0], s["switch"]["out"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add3"] ] ) )
s["switch"]["index"].setValue( 1 )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][1], s["switch"]["out"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add4"] ] ) )
s["switch"]["enabled"].setValue( False )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][0], s["switch"]["out"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add3"] ] ) )
s["switch"]["enabled"].setValue( True )
# Now set with an expression, so that there is actually a compute happening on the Switch to track
s["switch"]["expr"] = Gaffer.Expression()
s["switch"]["expr"].setExpression( 'parent["index"] = context["foo"]', "python" )
# Error during evaluation falls back to taking all inputs
with IECore.CapturingMessageHandler() as mh :
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( len( mh.messages ), 1 )
self.assertEqual( mh.messages[0].level, IECore.Msg.Level.Warning )
self.assertEqual( mh.messages[0].context, "Gaffer" )
self.assertEqual( mh.messages[0].message, 'Error during graph active state visualisation: switch.expr.__execute : Context has no variable named "foo"' )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][0], s["switch"]["in"][1], s["switch"]["index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add3"], s["add4"], s["switch"]["expr"] ] ) )
# Test setting switch index from context
c["foo"] = 0
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][0], s["switch"]["index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add3"], s["switch"]["expr"] ] ) )
c["foo"] = 1
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][1], s["switch"]["index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add4"], s["switch"]["expr"] ] ) )
# Also test disabling
s["switch"]["expr2"] = Gaffer.Expression()
s["switch"]["expr2"].setExpression( 'parent["enabled"] = False', "python" )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][0], s["switch"]["enabled"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add3"], s["switch"]["expr2"] ] ) )
s["switch"]["expr2"].setExpression( 'parent["enabled"] = True', "python" )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"][1], s["switch"]["enabled"], s["switch"]["index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["switch"], s["add4"], s["switch"]["expr2"], s["switch"]["expr"] ] ) )
# And NameSwitch
s["nameSwitch"] = Gaffer.NameSwitch()
s["nameSwitch"].setup( Gaffer.FloatPlug() )
s["nameSwitch"]["in"].resize( 3 )
s["nameSwitch"]["in"][0]["value"].setInput( s["add3"]["sum"] ) # Default
s["nameSwitch"]["in"][1]["name"].setValue( "AAA" )
s["nameSwitch"]["in"][1]["value"].setInput( s["add1"]["sum"] )
s["nameSwitch"]["in"][2]["value"].setInput( s["add2"]["sum"] )
s["nameSwitch"]["expr"] = Gaffer.Expression()
s["nameSwitch"]["expr"].setExpression( 'parent["selector"] = context["foo"]', "python" )
# Set one name with an expression - the output will always depend on this name
s["nameSwitch"]["expr2"] = Gaffer.Expression()
s["nameSwitch"]["expr2"].setExpression( 'parent["in"]["in2"]["name"] = "BBB"', "python" )
# Test default output
c["foo"] = "XXX"
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["nameSwitch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["nameSwitch"]["in"]["in0"]["value"], s["nameSwitch"]["selector"], s["nameSwitch"]["in"]["in2"]["name"], s["nameSwitch"]["__index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["nameSwitch"], s["add3"], s["nameSwitch"]["expr"], s["nameSwitch"]["expr2"] ] ) )
# Test first input
c["foo"] = "AAA"
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["nameSwitch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["nameSwitch"]["in"]["in1"]["value"], s["nameSwitch"]["selector"], s["nameSwitch"]["in"]["in2"]["name"], s["nameSwitch"]["__index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["nameSwitch"], s["add1"], s["nameSwitch"]["expr"], s["nameSwitch"]["expr2"] ] ) )
# Test second input
c["foo"] = "BBB"
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["nameSwitch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["nameSwitch"]["in"]["in2"]["value"], s["nameSwitch"]["selector"], s["nameSwitch"]["in"]["in2"]["name"], s["nameSwitch"]["__index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["nameSwitch"], s["add2"], s["nameSwitch"]["expr"], s["nameSwitch"]["expr2"] ] ) )
# Test same result if just querying value of output
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["nameSwitch"]["out"]["value"], c )
self.assertEqual( set( plugs ), set( [ s["nameSwitch"]["in"]["in2"]["value"], s["nameSwitch"]["selector"], s["nameSwitch"]["in"]["in2"]["name"], s["nameSwitch"]["__index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["nameSwitch"], s["add2"], s["nameSwitch"]["expr"], s["nameSwitch"]["expr2"] ] ) )
# Name of the output only depends names of inputs
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["nameSwitch"]["out"]["name"], c )
self.assertEqual( set( plugs ), set( [ s["nameSwitch"]["selector"], s["nameSwitch"]["in"]["in2"]["name"], s["nameSwitch"]["__index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["nameSwitch"], s["nameSwitch"]["expr"], s["nameSwitch"]["expr2"] ] ) )
# Test ContextProcessor
s["contextVariables"] = Gaffer.ContextVariables()
s["contextVariables"].setup( Gaffer.FloatPlug() )
s["contextVariables"]["variables"]["setVar"] = Gaffer.NameValuePlug( "foo", Gaffer.StringPlug( "value", defaultValue = 'AAA' ), True )
s["contextVariables"]["in"].setInput( s["nameSwitch"]["out"]["value"] )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["contextVariables"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["contextVariables"]["in"], s["nameSwitch"]["in"]["in1"]["value"], s["nameSwitch"]["selector"], s["nameSwitch"]["in"]["in2"]["name"], s["nameSwitch"]["__index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["contextVariables"], s["nameSwitch"], s["add1"], s["nameSwitch"]["expr"], s["nameSwitch"]["expr2"] ] ) )
s["contextVariables"]["enabled"].setValue( False )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["contextVariables"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["contextVariables"]["in"], s["nameSwitch"]["in"]["in2"]["value"], s["nameSwitch"]["selector"], s["nameSwitch"]["in"]["in2"]["name"], s["nameSwitch"]["__index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["contextVariables"], s["nameSwitch"], s["add2"], s["nameSwitch"]["expr"], s["nameSwitch"]["expr2"] ] ) )
# The behaviour of a Box is just a consequence of the previous behaviour, but it's somewhat complex
# and confusing, so I've got some specific tests for it
s["add5"] = GafferTest.AddNode()
s["add6"] = GafferTest.AddNode()
s["add3"]["op1"].setInput( s["add5"]["sum"] )
s["add4"]["op1"].setInput( s["add6"]["sum"] )
s["add1"]["op1"].setInput( s["add3"]["sum"] )
s["add2"]["op1"].setInput( s["add4"]["sum"] )
box = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["add3"], s["add4"] ] ) )
# The box itself counts as active, plus the input which is used by the internal network
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add1"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add1"]["op1"], box["add3"]["op1"], box["op1"], box["sum"] ] ) )
self.assertEqual( set( nodes ), set( [ box, s["add1"], box["add3"], s["add5"] ] ) )
# Add BoxIO nodes
Gaffer.BoxIO.insert( box )
# And add a passThrough connection
box["BoxOut1"]["passThrough"].setInput( box["BoxIn"]["out"] )
# Now we've got a bunch of intermediate BoxIO and Switch nodes
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add2"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add2"]["op1"], box["add4"]["op1"], box["op2"], box["sum1"], box["BoxIn1"]["__in"], box["BoxOut1"]["in"], box["BoxOut1"]["__out"], box["BoxOut1"]["__switch"]["in"]["in1"], box["BoxOut1"]["__switch"]["out"], box["BoxIn1"]["out"] ] ) )
self.assertEqual( set( nodes ), set( [ box, s["add2"], box["add4"], s["add6"], box["BoxOut1"]["__switch"], box["BoxOut1"], box["BoxIn1"] ] ) )
# Disabling uses the passThrough instead
box["enabled"].setValue( False )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add2"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add2"]["op1"], box["op1"], box["sum1"], box["BoxIn"]["__in"], box["BoxOut1"]["passThrough"], box["BoxOut1"]["__out"], box["BoxOut1"]["__switch"]["in"]["in0"], box["BoxOut1"]["__switch"]["out"], box["BoxIn"]["out"] ] ) )
self.assertEqual( set( nodes ), set( [ box, s["add2"], s["add5"], box["BoxOut1"]["__switch"], box["BoxOut1"], box["BoxIn"] ] ) )
# If we disconnect the passThrough, nothing gets through
box["BoxOut1"]["passThrough"].setInput( None )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add2"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add2"]["op1"], box["sum1"], box["BoxOut1"]["__out"], box["BoxOut1"]["__switch"]["in"]["in0"], box["BoxOut1"]["__switch"]["out"] ] ) )
self.assertEqual( set( nodes ), set( [ box, s["add2"], box["BoxOut1"]["__switch"], box["BoxOut1"] ] ) )
# Test a box with promoted array plug
s["add7"] = GafferTest.AddNode()
s["add1"]["op1"].setInput( s["add7"]["sum"] )
box2 = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["add7"] ] ) )
box2["add7"]["arrayInput"] = Gaffer.ArrayPlug( "arrayInput", Gaffer.Plug.Direction.In, Gaffer.FloatPlug() )
box2.promotePlug( box2["add7"]["arrayInput"] )
box2["arrayInput"][0].setInput( s["add5"]["sum"] )
box2["arrayInput"][1].setInput( s["add6"]["sum"] )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["add1"]["sum"], c )
self.assertEqual( set( plugs ), set( [ s["add1"]["op1"], box2["sum"], box2["add7"]["arrayInput"], box2["arrayInput"][0], box2["arrayInput"][1] ] ) )
self.assertEqual( set( nodes ), set( [ s["add1"], box2, box2["add7"], s["add5"], s["add6"] ] ) )
# Test Loop
s["loopSwitch"] = Gaffer.Switch()
s["loopSwitch"].setup( Gaffer.FloatPlug() )
for i in range( 5 ):
s["loopSwitchIn%i" % i ] = GafferTest.AddNode()
s["loopSwitchIn%i" % i ]["op1"].setValue( (i+1) * 10 ** i )
s["loopSwitch"]["in"][-1].setInput( s["loopSwitchIn%i" % i ]["sum"] )
s["loopSwitch"]["expr"] = Gaffer.Expression()
s["loopSwitch"]["expr"].setExpression( 'parent["index"] = context.get("loop:index", 4)', "python" )
s["start"] = GafferTest.AddNode()
s["start"]["op1"].setValue( 900000 )
s["loop"] = Gaffer.Loop()
s["loop"].setup( Gaffer.FloatPlug() )
s["loop"]["in"].setInput( s["start"]["sum"] )
s["merge"] = GafferTest.AddNode()
s["merge"]["op1"].setInput( s["loop"]["previous"] )
s["loop"]["next"].setInput( s["merge"]["sum"] )
s["merge"]["op2"].setInput( s["loopSwitch"]["out"] )
s["loop"]["iterations"].setValue( 4 )
self.assertEqual( s["loop"]["out"].getValue(), 904321.0 )
# We don't track through the loop for every separate loop context, we just fall back to a naive
# traversal that takes every input
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["loop"]["out"], c )
self.assertEqual( set( nodes ), set( [ s["loop"], s["start"], s["merge"], s["loopSwitch"], s["loopSwitch"]["expr"] ] + [ s["loopSwitchIn%i"%i] for i in range(5) ] ) )
# Same deal if we start from a node in the middle of the loop iteration
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["merge"]["sum"], c )
self.assertEqual( set( nodes ), set( [ s["loop"], s["start"], s["merge"], s["loopSwitch"], s["loopSwitch"]["expr"] ] + [ s["loopSwitchIn%i"%i] for i in range(5) ] ) )
def testActiveCompoundPlugs( self ):
s = Gaffer.ScriptNode()
c = Gaffer.Context()
p = Gaffer.Plug()
p.addChild( Gaffer.StringPlug() )
p.addChild( Gaffer.FloatPlug() )
s["contextVariables"] = Gaffer.ContextVariables()
s["contextVariables"].setup( p )
s["dummyNode"] = Gaffer.ComputeNode()
s["dummyNode"]["in"] = p.createCounterpart( "in", Gaffer.Plug.Direction.In )
s["dummyNode"]["out"] = p.createCounterpart( "out", Gaffer.Plug.Direction.Out )
s["dummyNode"]["in"].setInput( s["contextVariables"]["out"] )
# Check that we don't explicitly include child connections when the parents are connected
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["dummyNode"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["dummyNode"]["in"] ] ) )
self.assertEqual( set( nodes ), set( [ s["contextVariables"], s["dummyNode"] ] ) )
s["switch"] = Gaffer.Switch()
s["switch"].setup( p )
s["switch"]["in"][0].setInput( s["dummyNode"]["out"] )
s["switch"]["in"][1].setInput( s["contextVariables"]["out"] )
s["switch"]["expr"] = Gaffer.Expression()
s["switch"]["expr"].setExpression( 'parent["index"] = 1' )
# Check that we take only the children of active inputs
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"]["in1"], s["switch"]["index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["contextVariables"], s["switch"], s["switch"]["expr"] ] ) )
# Test that if a compound plug gets split, we take the children, even from one of the special case nodes
s["dummyNode"]["in"]["FloatPlug"].setInput( None )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["dummyNode"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["dummyNode"]["in"]["StringPlug"] ] ) )
self.assertEqual( set( nodes ), set( [ s["contextVariables"], s["dummyNode"] ] ) )
s["switch"]["in"][1]["StringPlug"].setInput( None )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["switch"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["switch"]["in"]["in1"]["FloatPlug"], s["switch"]["index"] ] ) )
self.assertEqual( set( nodes ), set( [ s["contextVariables"], s["switch"], s["switch"]["expr"] ] ) )
s["dummyNode"]["in"].setInput( None )
s["contextVariables"]["in"]["StringPlug"].setInput( s["dummyNode"]["out"]["StringPlug"] )
plugs, nodes = GafferUI.GraphGadget._activePlugsAndNodes( s["contextVariables"]["out"], c )
self.assertEqual( set( plugs ), set( [ s["contextVariables"]["in"]["StringPlug"] ] ) )
self.assertEqual( set( nodes ), set( [ s["contextVariables"], s["dummyNode"] ] ) )
testActivePlugsAndNodesCancellationCondition = threading.Condition()
def testActivePlugsAndNodesCancellation( self ) :
def testThreadFunc( plug, canceller, testInstance ):
with testInstance.assertRaises( IECore.Cancelled ):
GafferUI.GraphGadget._activePlugsAndNodes( plug, Gaffer.Context( Gaffer.Context(), canceller ) )
s = Gaffer.ScriptNode()
# Test a computation that supports cancellation
s["add"] = GafferTest.AddNode()
s["addExpr"] = Gaffer.Expression()
s["addExpr"].setExpression( inspect.cleandoc(
"""
import time
import Gaffer
import GafferUITest.GraphGadgetTest
with GafferUITest.GraphGadgetTest.testActivePlugsAndNodesCancellationCondition:
GafferUITest.GraphGadgetTest.testActivePlugsAndNodesCancellationCondition.notify()
while True:
IECore.Canceller.check( context.canceller() )
parent["add"]["enabled"] = True
"""
) )
canceller = IECore.Canceller()
with GraphGadgetTest.testActivePlugsAndNodesCancellationCondition:
t = Gaffer.ParallelAlgo.callOnBackgroundThread(
s["add"]["sum"], lambda : testThreadFunc( s["add"]["sum"], canceller, self )
)
GraphGadgetTest.testActivePlugsAndNodesCancellationCondition.wait()
canceller.cancel()
t.wait()
def assertHighlighting( self, graphGadget, expectedState ) :
# Highlighting is performed as a background task, so we have
# to wait for it to finish. We allow up to 2s for this to happen,
# to account for CI workers under heavy load.
timeout = time.time() + 2
while True :
self.waitForIdle()
actualState = {
k : not graphGadget.nodeGadget( graphGadget.getRoot()[k] ).getContents().getDimmed()
for k in expectedState.keys()
}
if actualState == expectedState :
return
elif time.time() > timeout :
# Emit descriptive failure
self.assertEqual( actualState, expectedState )
def testDirtyTrackingForInitialFocusNode( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["switch"] = Gaffer.Switch()
script["switch"].setup( script["add1"]["op1"] )
script["switch"]["in"][0].setInput( script["add1"]["sum"] )
script["switch"]["in"][1].setInput( script["add2"]["sum"] )
script.setFocus( script["switch"] )
with GafferUI.Window() as window :
graphGadget = GafferUI.GraphGadget( script )
gadgetWidget = GafferUI.GadgetWidget( graphGadget )
# Initially we expect the left branch of the switch to be highlighted.
window.setVisible( True )
self.assertHighlighting( graphGadget, { "switch" : True, "add1" : True, "add2" : False } )
# If we switch to the right branch, we expect the highlighting to
# follow suit.
script["switch"]["index"].setValue( 1 )
self.assertHighlighting( graphGadget, { "switch" : True, "add1" : False, "add2" : True } )
if __name__ == "__main__":
unittest.main()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import threading
import time
import ast
from urllib.parse import urlparse
from urllib.request import urlopen
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from nacl import encoding, public
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (ResourceNotFoundError, RequiredArgumentMissingError, ValidationError,
CLIInternalError, UnclassifiedUserFault, AzureResponseError,
ArgumentUsageError, MutuallyExclusiveArgumentError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import (_normalize_sku,
get_sku_name,
retryable_method,
raise_missing_token_suggestion,
_get_location_from_resource_group,
_list_app,
_rename_server_farm_props,
_get_location_from_webapp, _normalize_location)
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH)
from ._github_oauth import (get_github_access_token)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
from azure.mgmt.web.models import Site
SiteConfig, SkuDescription, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(name=plan, resource_group_name=resource_group_name)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
if subnet or vnet:
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up, virtual_network_subnet_id=subnet_resource_id)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def _validate_vnet_integration_location(cmd, subnet_resource_group, vnet_name, webapp_location):
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet_location = vnet_client.get(resource_group_name=subnet_resource_group,
virtual_network_name=vnet_name).location
vnet_location = _normalize_location(cmd, vnet_location)
asp_location = _normalize_location(cmd, webapp_location)
if vnet_location != asp_location:
raise ArgumentUsageError("Unable to create webapp: vnet and App Service Plan must be in the same location. "
"vnet location: {}. Plan location: {}.".format(vnet_location, asp_location))
def _get_subnet_info(cmd, resource_group_name, vnet, subnet):
from azure.cli.core.commands.client_factory import get_subscription_id
subnet_info = {"vnet_name": None,
"subnet_name": None,
"resource_group_name": None,
"subnet_resource_id": None,
"subnet_subscription_id": None,
"vnet_resource_id": None}
if is_valid_resource_id(subnet):
if vnet:
logger.warning("--subnet argument is a resource ID. Ignoring --vnet argument.")
parsed_sub_rid = parse_resource_id(subnet)
subnet_info["vnet_name"] = parsed_sub_rid["name"]
subnet_info["subnet_name"] = parsed_sub_rid["resource_name"]
subnet_info["resource_group_name"] = parsed_sub_rid["resource_group"]
subnet_info["subnet_resource_id"] = subnet
subnet_info["subnet_subscription_id"] = parsed_sub_rid["subscription"]
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(parsed_sub_rid["subscription"],
parsed_sub_rid["resource_group"],
parsed_sub_rid["name"])
return subnet_info
subnet_name = subnet
if is_valid_resource_id(vnet):
parsed_vnet = parse_resource_id(vnet)
subnet_rg = parsed_vnet["resource_group"]
vnet_name = parsed_vnet["name"]
subscription_id = parsed_vnet["subscription"]
subnet_info["vnet_resource_id"] = vnet
else:
logger.warning("Assuming subnet resource group is the same as webapp. "
"Use a resource ID for --subnet or --vnet to use a different resource group.")
subnet_rg = resource_group_name
vnet_name = vnet
subscription_id = get_subscription_id(cmd.cli_ctx)
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(subscription_id,
subnet_rg,
vnet)
subnet_id_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}"
subnet_rid = subnet_id_fmt.format(subscription_id, subnet_rg, vnet_name, subnet_name)
subnet_info["vnet_name"] = vnet_name
subnet_info["subnet_name"] = subnet_name
subnet_info["resource_group_name"] = subnet_rg
subnet_info["subnet_resource_id"] = subnet_rid
subnet_info["subnet_subscription_id"] = subscription_id
return subnet_info
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed. Use 'az webapp config appsettings list --name MyWebapp --resource-group "
"MyResourceGroup --subscription MySubscription' to list app settings and 'az webapp "
"config appsettings delete --name MyWebApp --resource-group MyResourceGroup "
"--setting-names <setting-names> to delete them.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, "webapp", slot)
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def show_functionapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, 'functionapp', slot)
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _show_app(cmd, resource_group_name, name, cmd_app_type, slot=None):
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not app:
raise ResourceNotFoundError("Unable to find {} '{}', in RG '{}'.".format(
cmd_app_type, name, resource_group_name))
app_type = _kind_to_app_type(app.kind) if app else None
if app_type != cmd_app_type:
raise ResourceNotFoundError(
"Unable to find {} '{}', in RG '{}'".format(cmd_app_type.value, name, resource_group_name),
"Use 'az {} show' to show {}s".format(app_type.value, app_type.value))
app.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
_rename_server_farm_props(app)
_fill_ftp_publishing_url(cmd, app, resource_group_name, name, slot)
return app
def _kind_to_app_type(kind):
if "workflow" in kind:
return "logicapp"
if "function" in kind:
return "functionapp"
return "webapp"
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = []
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise MutuallyExclusiveArgumentError('Usage error: --is-linux and --hyper-v cannot be used together.')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
for host in app.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_functionapp(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 3. In the future, specifying a version will "
"be required. To create a 3.x function you would pass in the flag `--functions-version 3`")
functions_version = '3'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
from azure.mgmt.web.models import Site
SiteConfig, NameValuePair = cmd.get_models('SiteConfig', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
client = web_client_factory(cmd.cli_ctx)
if vnet or subnet:
if plan:
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
webapp_location = plan_info.location
else:
webapp_location = consumption_plan_location
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=webapp_location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
functionapp_def = Site(location=None, site_config=site_config, tags=tags,
virtual_network_subnet_id=subnet_resource_id)
KEYS = FUNCTIONS_STACKS_API_KEYS()
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS', 'Standard_GZRS'] # pylint: disable=line-too-long
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_webapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check, True)
def add_functionapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None,
skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check, False)
def _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False,
is_webapp=True):
from azure.mgmt.web.models import SitePatchResource
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
client = web_client_factory(cmd.cli_ctx)
if is_webapp:
app = show_webapp(cmd, resource_group_name, name, slot)
else:
app = show_functionapp(cmd, resource_group_name, name, slot)
parsed_plan = parse_resource_id(app.app_service_plan_id)
plan_info = client.app_service_plans.get(parsed_plan['resource_group'], parsed_plan["name"])
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"])
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
subnet_id = subnet_info["subnet_resource_id"]
if not slot:
client.web_apps.update(resource_group_name=resource_group_name,
name=name,
site_envelope=SitePatchResource(virtual_network_subnet_id=subnet_id))
else:
client.web_apps.update_slot(resource_group_name=resource_group_name,
name=name,
slot=slot,
site_envelope=SitePatchResource(virtual_network_subnet_id=subnet_id))
# Enable Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
return {
"id": subnet_info["vnet_resource_id"],
"location": plan_info.location, # must be the same as vnet location bc of validation check
"name": subnet_info["vnet_name"],
"resourceGroup": subnet_info["resource_group_name"],
"subnetResourceId": subnet_info["subnet_resource_id"]
}
def _vnet_delegation_check(cmd, subnet_subscription_id, vnet_resource_group, vnet_name, subnet_name):
from azure.cli.core.commands.client_factory import get_subscription_id
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
vnet_client = network_client_factory(cmd.cli_ctx)
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
logger.warning('To manually add a delegation, use the command: az network vnet subnet update '
'--resource-group %s '
'--name %s '
'--vnet-name %s '
'--delegations Microsoft.Web/serverFarms', vnet_resource_group, subnet_name, vnet_name)
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False,
app_service_environment=None):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription if updating an existing app. If creating "
"a new app, app names must be globally unique. Please try a more unique name or "
"leave unspecified to receive a randomly generated name.".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation. For more information on default behaviors, "
"see https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az_webapp_up."
.format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc,
app_service_environment=app_service_environment)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. Please wait until that process is complete before "
"starting a new deployment. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
if not instance:
open_page_in_browser(scm_url + '/webssh/host')
else:
open_page_in_browser(scm_url + '/webssh/host?instance={}'.format(instance))
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku.upper() in ['I1', 'I2', 'I3', 'I1V2', 'I2V2', 'I3V2']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(runtime_string, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
supports = False
for stack in stacks:
if stack['displayName'].lower() == runtime_string.lower():
if 'github_actions_properties' in stack and stack['github_actions_properties']:
supports = True
return supports
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if stack['displayName'].lower() == app_runtime.lower():
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if (stack['github_actions_properties']['app_runtime'].lower() == app_runtime.lower() and
stack['github_actions_properties']['app_runtime_version'].lower() ==
app_runtime_version.lower()):
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
Melee Code Manager.py
|
#!/usr/bin/python
# This Python file uses the following encoding: utf-8
# ------------------------------------------------------------------- #
# ~ ~ Written by DRGN of SmashBoards (Daniel R. Cappel); June, 2015 ~ ~ #
# - - [Python v2.7.9 & 2.7.12] - - #
# ------------------------------------------------------------------- #
programVersion = '4.4'
# # Find the official thread here:
# http://smashboards.com/threads/melee-code-manager-easily-add-codes-to-your-game.416437/
# Primary logic.
import os # For various file and directory operations.
import time # For performance testing
import json # For opening/parsing yaml config files (used for alternate mod folder structure syntax)
import errno
import codecs
import sys, csv
import datetime
#import win32api # Needs to be installed via "pip install pywin32"
import webbrowser # Used to open a web page (the Melee Workshop, from the createNewDolMod function) or text file.
import subprocess, math # For communication with command line, and rounding operations, respectively.
import struct, binascii # For converting byte strings to integers. And binascii for the file hash function.
from decimal import Decimal # For the Code Creation tab's number conversions
from string import hexdigits # For checking that a string only consists of hexadecimal characters.
from binascii import hexlify
from collections import OrderedDict
from sets import Set # Used for duplicate mod detection and other unique lists
try: from cStringIO import StringIO # For compiling/decompiling. cStringIO referred for performance.
except: from StringIO import StringIO
# GUI stuff
from Tkinter import Tk, Toplevel, Frame, StringVar, IntVar, BooleanVar, Label, OptionMenu, Spinbox, Button, Menu, Scrollbar, Canvas
from commonGuiModules import getWindowGeometry, basicWindow, CopyableMsg, PopupEntryWindow, PopupScrolledTextWindow, ToolTip
import Tkinter, ttk, tkMessageBox, tkFileDialog, tkFont
from ScrolledText import ScrolledText
from PIL import Image, ImageTk # For working with more image formats than just GIF.
from urlparse import urlparse # For validating and security checking URLs
from threading import Thread
from newTkDnD import TkDND # Access files given (drag-and-dropped) onto the running program GUI.
import pyaudio, wave
import webbrowser # Used for opening web pages
# User defined settings / persistent memory.
import settings as settingsFile
import ConfigParser
settings = ConfigParser.SafeConfigParser()
settings.optionxform = str # Tells the parser to preserve case sensitivity (for camelCase).
# Set up some useful globals
scriptHomeFolder = os.path.abspath( os.path.dirname(sys.argv[0]) )
dolsFolder = scriptHomeFolder + '\\Original DOLs'
imageBank = {} # Populates with key=fileNameWithoutExt, value=ImageTk.PhotoImage object
soundBank = {} # Populates with key=fileNameWithoutExt, value=fullFilePath
genGlobals = { # General Globals (migrate other globals to here)
'optionsFilePath': scriptHomeFolder + '\\options.ini',
'allModNames': Set(), # Will contain only unique mod names (used for duplicate mod detection)
'allMods': [],
'allStandaloneFunctions': {},
'originalDolRevisions': [],
'modifiedRegions': []
}
# The following are saved in the options.ini file (if it exists). To access them within the program, use:
# settings.get( 'General Settings', [valueName] )
# settings.set( 'General Settings', [valueName], [newValue] )
# And save them to file with a call to saveOptions()
generalOptionDefaults = {
'modsFolderPath': scriptHomeFolder + '\\Mods Library',
'modsFolderIndex': '0',
'defaultSearchDirectory': os.path.expanduser( '~' ),
'defaultFileFormat': 'iso',
'onlyUpdateGameSettings': 'False',
'hexEditorPath': '',
# 'altFontColor': '#d1cede', # A shade of silver; useful for high-contrast system themes
'offsetView': 'ramAddress', # alternate acceptable value=dolOffset (not case sensitive or affected by spaces)
'summaryOffsetView': 'dolOffset',
'sortSummaryByOffset': 'False'
}
overwriteOptions = OrderedDict() # Populates with key=customCodeRegionName, value=BooleanVar (to describe whether the regions should be used.)
# The following two dictionaries are only used for SSBM, for the game's default settings.
# Key = target widget, value = tuple of "(tableOffset, gameDefault, tourneyDefault [, str translations])".
# The gameDefault and tourneyDefault integers are indexes, relating to the string portion of the tuple (if it has a string portion).
# If the tuple doesn't have a string portion, then the values are instead the direct value for the setting (e.g. 3 stock or 4 stock).
settingsTableOffset = { 'NTSC 1.00': 0x3CFB90, 'NTSC 1.01': 0x3D0D68, 'NTSC 1.02': 0x3D1A48, 'NTSC 1.03': 0x3D1A48, 'PAL 1.00': 0x3D20C0 }
gameSettingsTable = {
'gameModeSetting': (2, 0, 1, 'Time', 'Stock', 'Coin', 'Bonus'), # AA
'gameTimeSetting': (3, 2, 2), # BB
'stockCountSetting': (4, 3, 4), # CC
'handicapSetting': (5, 0, 0, 'Off', 'Auto', 'On'), # DD
'damageRatioSetting': (6, 1.0, 1.0), # EE
'stageSelectionSetting': (7, 0, 0, 'On', 'Random', 'Ordered', 'Turns', 'Loser'), # FF
'stockTimeSetting': (8, 0, 8), # GG
'friendlyFireSetting': (9, 0, 1, 'Off', 'On'), # HH
'pauseSetting': (10, 1, 0, 'Off', 'On'), # II
'scoreDisplaySetting': (11, 0, 1, 'Off', 'On'), # JJ
'selfDestructsSetting': (12, 0, 0, '-1', '0', '-2'), # KK
'itemFrequencySetting': (24, 3, 0, 'None', 'Very Low', 'Low',
'Medium', 'High', 'Very High', 'Extremely High'), # PP
'itemToggleSetting': (36, 'FFFFFFFF', '00000000'),
'p1RumbleSetting': (40, 1, 0, 'Off', 'On'), # R1
'p2RumbleSetting': (41, 1, 0, 'Off', 'On'), # R2
'p3RumbleSetting': (42, 1, 0, 'Off', 'On'), # R3
'p4RumbleSetting': (43, 1, 0, 'Off', 'On'), # R4
#'soundBalanceSetting': (44, 0, 0), # MM
#'deflickerSetting': (45, 1, 1, 'Off', 'On'), # SS
#'languageSetting': (46, ), # Game ignores this in favor of system default? # LL
'stageToggleSetting': (48, 'FFFFFFFF', 'E70000B0') # TT
#'bootToSetting': ()
#'dbLevelSetting': ()
} # The variables that the program and GUI use to track the user's changes to these settings are contained in 'currentGameSettingsValues'
#=========================#
# ~ ~ General Functions ~ ~ #
#=========================#
def msg( *args ):
if len(args) > 2: tkMessageBox.showinfo( message=args[0], title=args[1], parent=args[2] )
elif len(args) > 1: tkMessageBox.showinfo( message=args[0], title=args[1] )
else: tkMessageBox.showinfo( message=args[0] )
def toInt( input ): # Converts 1, 2, and 4 byte bytearray values to integers.
try:
byteLength = len( input )
if byteLength == 1: return struct.unpack( '>B', input )[0] # big-endian unsigned char (1 byte)
elif byteLength == 2: return struct.unpack( '>H', input )[0] # big-endian unsigned short (2 bytes)
else: return struct.unpack( '>I', input )[0] # big-endian unsigned int (4 bytes)
except:
raise ValueError( 'toInt was not able to convert the ' + str(type(input))+' type' )
def isNaN( var ): ## Test if a variable 'is Not a Number'
try:
float( var )
return False
except ValueError:
return True
def isEven( inputVal ):
if inputVal % 2 == 0: return True # Non-zero modulus indicates an odd number.
else: return False
def roundTo32( x, base=32 ): # Rounds up to nearest increment of 32.
return int( base * math.ceil(float(x) / base) )
def uHex( integer ): # Quick conversion to have a hex function which uses uppercase characters.
if integer == 0: return '0'
else: return '0x' + hex( integer )[2:].upper()
def toHex( intOrStr, padTo ): # Creates a hex string (from an int or int string) and pads the result to n zeros (nibbles), the second parameter.
return "{0:0{1}X}".format( int( intOrStr ), padTo )
def grammarfyList( theList ): # For example, the list [apple, pear, banana, melon] becomes the string 'apple, pear, banana, and melon'.
if len(theList) == 1: return str(theList[0])
elif len(theList) == 2: return str(theList[0]) + ' and ' + str(theList[1])
else:
string = ', '.join( theList )
indexOfLastComma = string.rfind(',')
return string[:indexOfLastComma] + ', and ' + string[indexOfLastComma + 2:]
def convertCamelCase( originalString ): # Normalizes camelCase strings to normal writing; e.g. "thisIsCamelCase" to "This is camel case"
stringList = []
for character in originalString:
if stringList == []: stringList.append( character.upper() ) # Capitalize the first character in the string
elif character.isupper(): stringList.append( ' ' + character )
else: stringList.append( character )
return ''.join( stringList )
def humansize( nbytes ): # Used for converting file sizes, in terms of human readability.
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def createFolders( folderPath ):
try:
os.makedirs( folderPath )
except OSError as exc: # Suppresses error if the directory already exists. Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir( folderPath ):
pass
else: raise
def findAll( stringToLookIn, subString, charIncrement=2 ): # Finds ALL instances of a string in another string
matches = []
i = stringToLookIn.find( subString )
while i >= 0:
matches.append( i )
i = stringToLookIn.find( subString, i + charIncrement ) # Change 2 to 1 if not going by bytes.
return matches
def getType( object ):
return str(type(object)).replace("<type '", '').replace("'>", '')
# def CRC32_from_file( filepath ):
# #buf = open( filepath, 'rb').read()
# with open( filepath, 'rb' ) as file: buf = file.read()
# buf = (binascii.crc32( buf ) & 0xFFFFFFFF)
# return "%08X" % buf
def validHex( offset ): # Accepts a string.
if offset == '': return False
return all(char in hexdigits for char in offset) # Returns Boolean
def copyToClipboard( text ):
root.clipboard_clear()
root.clipboard_append( text )
def rgb2hex( color ): return '#{:02x}{:02x}{:02x}'.format( color[0], color[1], color[2]) # input can be RGBA, but output will still be RGB
def hex2rgb( hexColor ): # Expects '#RRGGBB' or '#RRGGBBAA'
hexColor = hexColor.replace( '#', '' )
channelsList = []
if len( hexColor ) % 2 != 0: # Checks whether the string is an odd number of characters
raise ValueError( 'Input to hex2rgb must be an even number of characters!' )
else:
for i in xrange( 0, len(hexColor), 2 ): # Iterate by 2 over the length of the input string
channelByte = hexColor[i:i+2]
channelsList.append( int( channelByte, 16 ) )
return tuple(channelsList)
def name2rgb( name ):
""" Converts a Tkinter color name to an RGB tuple. """
colorChannels = root.winfo_rgb( name ) # Returns a 16-bit RGB tuple (0-65535)
return tuple( [channel / 256 for channel in colorChannels] ) # Converts to an 8-bit RGB tuple (0-255)
def openFolder( folderPath, fileToSelect='' ): # Slow function, but cannot select files
folderPath = os.path.abspath( folderPath ) # Turns relative to absolute paths, and normalizes them (switches / for \, etc.)
if os.path.exists( folderPath ):
if fileToSelect: # Slow method, but can select/highlight items in the folder
if not os.path.exists( folderPath + '\\' + fileToSelect ):
print( 'Could not find this file: \n\n' + fileToSelect )
return
command = '"C:\\Windows\\explorer.exe" /select, \"{}\\{}\"'.format( folderPath, fileToSelect )
try:
outputStream = subprocess.check_output(command, shell=False, stderr=subprocess.STDOUT, creationflags=0x08000000) # shell=True gives access to all shell features.
except subprocess.CalledProcessError as error:
outputStream = str(error.output)
if len(outputStream) != 0:
exitCode = str(error.returncode)
print( 'IPC error: \n\n' + outputStream + '\n\nErrorlevel ' + exitCode )
except Exception as generalError:
print( 'IPC error: \n\n' + generalError )
else: # Fast method, but cannot select files
os.startfile( folderPath )
else: print( 'Could not find this folder: \n\n' + folderPath )
#=====================================#
# ~ ~ Initialization & File Reading ~ ~ #
#=====================================#
class dolInitializer( object ):
def __init__( self ):
self.reset()
def reset( self ):
self.path = ''
self.type = '' # Essentially the file type/extension. Expected to be 'dol', 'iso', or 'gcm'
self.gameId = ''
self.discVersion = ''
self.region = ''
self.offset = 0
self.version = ''
self.revision = ''
self.isMelee = False # Will only be true for Super Smash Bros. Melee
self.is20XX = False
self.data = '' # Future #todo: upgrade to a bytearray, below
#self.bytes = bytearray()
self.sectionInfo = OrderedDict()
self.maxDolOffset = 0
self.maxRamAddress = 0
self.customCodeRegions = OrderedDict()
self.isLoading = False
def checkIfMelee( self ):
# Check the DOL for a string of "Super Smash Bros. Melee" at specific locations
self.isMelee = True
ssbmStringBytes = bytearray()
ssbmStringBytes.extend( "Super Smash Bros. Melee" )
dataBytes = bytearray.fromhex( self.data )
if dataBytes[0x3B78FB:0x3B7912] == ssbmStringBytes: self.region = 'NTSC'; self.version = '1.02' # most common, so checking for it first
elif dataBytes[0x3B6C1B:0x3B6C32] == ssbmStringBytes: self.region = 'NTSC'; self.version = '1.01'
elif dataBytes[0x3B5A3B:0x3B5A52] == ssbmStringBytes: self.region = 'NTSC'; self.version = '1.00'
elif dataBytes[0x3B75E3:0x3B75FA] == ssbmStringBytes: self.region = 'PAL'; self.version = '1.00'
else:
self.region = self.version = ''
self.isMelee = False
def getDolVersion( self ):
# The range 0xE4 to 0x100 in the DOL is normally unused padding. This can be used to specify a DOL version.
customDolVersionRange = bytearray.fromhex( self.data )[0xE4:0x100]
customVersionString = customDolVersionRange.split(b'\x00')[0].decode( 'ascii' )
# If a custom string exists, validate and use that, or else prompt the user (using disc region/version for predictors)
if customVersionString and ' ' in customVersionString: # Highest priority for determining version
apparentRegion, apparentVersion = normalizeRegionString( customVersionString ).split() # Should never return 'ALL' in this case
if ( apparentRegion == 'NTSC' or apparentRegion == 'PAL' ) and apparentVersion.find( '.' ) != -1:
self.region, self.version = apparentRegion, apparentVersion
if not self.region or not self.version:
# Check the filepath; if this is an original DOL in the Original DOLs folder, use the file name to determine version
if self.type == 'dol' and os.path.dirname( self.path ) == dolsFolder:
self.region, self.version = normalizeRegionString( os.path.basename(self.path) ).split()
return
# Attempt to predict details based on the disc, if present
regionSuggestion = 'NTSC'
versionSuggestion = '02'
if self.type == 'iso' or self.type == 'gcm':
if self.region == 'NTSC' or self.region == 'PAL':
regionSuggestion = self.region
if '.' in self.discVersion:
versionSuggestion = self.discVersion.split('.')[1]
userMessage = ( "The revision of the DOL within this disc is being predicted from the disc's details. Please verify them below. "
"(If this disc has not been altered, these predictions can be trusted.)" )
else:
userMessage = "This DOL's revision could not be determined. Please select a region and game version below."
userMessage += ' Note that codes may not be able to be installed or detected properly if these are set incorrectly.'
revisionWindow = RevisionPromptWindow( userMessage, regionSuggestion, versionSuggestion )
if revisionWindow.region and revisionWindow.version: # Save the user-confirmed revision
self.region = revisionWindow.region
self.version = revisionWindow.version
self.writeSig()
# Write the new DOL data to file/disc
if self.type == 'dol':
with open( self.path, 'wb') as dolBinary:
dolBinary.write( bytearray.fromhex(self.data) )
elif ( self.type == 'iso' or self.type == 'gcm' ) and self.offset != 0:
with open( self.path, 'r+b') as isoBinary:
isoBinary.seek( self.offset )
isoBinary.write( bytearray.fromhex(self.data) )
else: # Checking for the window sinceAssume the suggested revision
self.region = regionSuggestion
self.version = '1.' + versionSuggestion
msg( 'Without confirmation, the DOL file will be assumed as ' + self.region + ' ' + self.version + '. '
'If this is incorrect, you may run into problems detecting currently installed mods or with adding/removing mods. '
'And installing mods may break game functionality.', 'Revision Uncertainty', root )
def checkIf20XX( self ):
""" Checks the DOL file size, and looks for a 4-byte value (which is also the file size)
at the end of the DOL file, in order to determine whether it's a DOL for 20XX HP.
Note that the file data is actually a string to make certain things easier (and because
of how this program was initially written). """
if len( self.data ) / 2 == 0x438800 and self.data[-8:] == '00438800':
self.is20XX = True
def writeSig( self ):
""" Saves the DOL's determined revision to the file in an unused area,
so that subsequent loads don't have to ask about it. """
# Create the hex string; [DOL Revision]00[Program Version]
revisionDataHex = ( self.region + ' ' + self.version ).encode("hex")
revisionDataHex += '00' + ( 'MCM v' + programVersion ).encode("hex") # Adds a stop byte and MCM's version number
# Ensure the data length never exceeds 0x1C bytes
nibLength = len( revisionDataHex )
if nibLength > 0x38: # 0x38 = 0x1C * 2
revisionDataHex = revisionDataHex[:0x38]
padding = ''
else:
# Fill the rest of the section with zeroes
padding = '0' * ( 0x38 - nibLength )
# Write the string to the file data
self.data = self.data[:0x1C8] + revisionDataHex + padding + self.data[0x200:] # Static values doubled to count by nibbles; 0x1C8 = 0xE4 * 2
def getDolDataFromIso( self, isoPath ):
with open( isoPath, 'rb') as isoBinary:
# Collect info on the disc
self.gameId = isoBinary.read(6).decode( 'ascii' ) # First 6 bytes
isoBinary.seek( 1, 1 ) # Second arg means to seek relative to current position
versionHex = isoBinary.read(1).encode("hex")
regionCode = self.gameId[3]
ntscRegions = ( 'A', 'E', 'J', 'K', 'R', 'W' )
if regionCode in ntscRegions: self.region = 'NTSC'
else: self.region = 'PAL'
self.discVersion = '1.' + versionHex
# Get the DOL's ISO file offset, length, and data.
isoBinary.seek( 0x0420 )
self.offset = toInt( isoBinary.read(4) ) # Should be 0x1E800 for SSBM NTSC v1.02
tocOffset = toInt( isoBinary.read(4) )
dolLength = tocOffset - self.offset # Should be 0x438600 for SSBM NTSC v1.02
isoBinary.seek( self.offset )
self.data = isoBinary.read( dolLength ).encode("hex")
def load( self, filepath ):
self.reset()
self.isLoading = True
# Initialize the file path and type
self.path = filepath
self.type = os.path.splitext( filepath )[1].lower().replace( '.', '' )
# Validate the given path
if not filepath or not os.path.exists( filepath ):
msg( 'The recieved filepath was invalid, or for some reason the file was not found.' )
return
# Get the DOL's file data
if self.type == 'iso' or self.type == 'gcm':
self.getDolDataFromIso( filepath )
elif self.type == 'dol':
with open( filepath, 'rb') as binaryFile:
self.data = binaryFile.read().encode("hex")
else:
msg( "The given file doesn't appear to be an ISO/GCM or DOL file. \nIf it in fact is, "
"you'll need to rename it with a file extension of '.iso/.gcm' or '.dol'.", 'Incorrect file type.' )
return
# Check if this is a revision of Super Smash Bros. Melee for the Nintendo GameCube.
self.checkIfMelee()
# General check for DOL revision (region + version). This will prompt the user if it cannot be determined.
self.getDolVersion()
self.checkIf20XX()
if ( self.region == 'NTSC' or self.region == 'PAL' ) and '.' in self.version:
self.revision = self.region + ' ' + self.version
self.parseHeader()
self.isLoading = False
def parseHeader( self ):
headerData = bytearray.fromhex( self.data )[:0x100]
# Separate the section information
textFileOffsets = headerData[:0x1C]
dataFileOffsets = headerData[0x1C:0x48]
textMemAddresses = headerData[0x48:0x64]
dataMemAddresses = headerData[0x64:0x90]
textSizes = headerData[0x90:0xAC]
dataSizes = headerData[0xAC:0xD8]
self.bssMemAddress = toInt( headerData[0xD8:0xDC] )
self.bssSize = toInt( headerData[0xDC:0xE0] )
self.entryPoint = toInt( headerData[0xE0:0xE4] )
self.maxDolOffset = 0
self.maxRamAddress = 0
# Combine data points into a single definition for each text section
for i in xrange( 6 ): # No more than 6 possible
listIndex = i * 4
fileOffset = toInt( textFileOffsets[listIndex:listIndex+4] )
memAddress = toInt( '\x00' + textMemAddresses[listIndex+1:listIndex+4] )
size = toInt( textSizes[listIndex:listIndex+4] )
# If any of the above values are 0, there are no more sections
if fileOffset == 0 or memAddress == 0 or size == 0: break
self.sectionInfo['text'+str(i)] = ( fileOffset, memAddress, size )
# Find the max possible dol offset and ram address for this game's dol
if fileOffset + size > self.maxDolOffset: self.maxDolOffset = fileOffset + size
if memAddress + size > self.maxRamAddress: self.maxRamAddress = memAddress + size
# Combine data points into a single definition for each data section
for i in xrange( 10 ): # No more than 10 possible
listIndex = i * 4
fileOffset = toInt( dataFileOffsets[listIndex:listIndex+4] )
memAddress = toInt( '\x00' + dataMemAddresses[listIndex+1:listIndex+4] )
size = toInt( dataSizes[listIndex:listIndex+4] )
if fileOffset == 0 or memAddress == 0 or size == 0: break
self.sectionInfo['data'+str(i)] = ( fileOffset, memAddress, size )
# Find the max possible dol offset and ram address for this game's dol
if fileOffset + size > self.maxDolOffset: self.maxDolOffset = fileOffset + size
if memAddress + size > self.maxRamAddress: self.maxRamAddress = memAddress + size
def loadCustomCodeRegions( self ):
""" Loads and validates the custom code regions available for this DOL revision.
Filters out regions pertaining to other revisions, and those that fail basic validation. """
incompatibleRegions = []
print '\nLoading custom code regions....'
# Load all regions applicable to this DOL (even if disabled in options)
for fullRegionName, regions in settingsFile.customCodeRegions.items():
revisionList, regionName = parseSettingsFileRegionName( fullRegionName )
# Skip recent 20XX regions if this is not a recent 20XX DOL (v4.07++ or higher)
if not self.is20XX and regionName.startswith( '20XXHP' ):
continue
# Check if the region/version of these regions are relavant to the currently loaded DOL revision
elif 'ALL' in revisionList or self.revision in revisionList:
# Validate the regions; perform basic checks that they're valid ranges for this DOL
for i, ( regionStart, regionEnd ) in enumerate( regions, start=1 ):
# Check that the region start is actually smaller than the region end
if regionStart >= regionEnd:
msg( 'Warning! The starting offset for region ' + str(i) + ' of "' + regionName + '" for ' + self.revision + ' is greater or '
"equal to the ending offset. A region's starting offset must be smaller than the ending offset.", 'Invalid Custom Code Region' )
incompatibleRegions.append( regionName )
break
# Check that the region start is within the DOL's code or data sections
elif regionStart < 0x100 or regionStart >= self.maxDolOffset:
print "Region start (0x{:X}) for {} is outside of the DOL's code\\data sections.".format( regionStart, regionName )
incompatibleRegions.append( regionName )
break
# Check that the region end is within the DOL's code or data sections
elif regionEnd > self.maxDolOffset:
print "Region end (0x{:X}) for {} is outside of the DOL's code\\data sections.".format( regionEnd, regionName )
incompatibleRegions.append( regionName )
break
# Regions validated; allow them to show up in the GUI (Code-Space Options)
if regionName not in incompatibleRegions:
self.customCodeRegions[regionName] = regions
if incompatibleRegions:
print ( '\nThe following regions are incompatible with the ' + self.revision + ' DOL, '
'because one or both offsets fall outside of the offset range of the file:\n\n\t' + '\n\t'.join(incompatibleRegions) + '\n' )
class geckoInitializer( object ):
""" Validates Gecko configuration settings in the settings.py file, collects info, and ensures Gecko codes can be utilzed. """
def __init__( self ):
self.reset()
def reset( self ):
self.environmentSupported = False
self.hookOffset = -1
self.codelistRegion = ''
self.codelistRegionStart = self.codelistRegionEnd = -1
self.codehandlerRegion = ''
self.codehandlerRegionStart = self.codehandlerRegionEnd = -1
self.codehandler = bytearray()
self.codehandlerLength = 0
self.spaceForGeckoCodelist = 0
self.spaceForGeckoCodehandler = 0
#self.geckoConfigWarnings = [] # todo: better practice to use this to remember them until the user
# tries to enable Gecko codes (instead of messaging the user immediately)
# Check for a dictionary on Gecko configuration settings; this doesn't exist in pre-v4.0 settings files
self.geckoConfig = getattr( settingsFile, "geckoConfiguration", {} )
def checkSettings( self ):
self.reset()
if not dol.revision: return
self.setGeckoHookOffset()
if self.hookOffset == -1:
msg( 'Warning! No geckoConfiguration properties could be found in the settings.py file for DOL revision "' + dol.revision + '".'
'Gecko codes cannot be used until this is resolved.', 'Gecko Misconfiguration' )
return
self.codelistRegionStart, self.codelistRegionEnd = self.getCodelistRegion()
if self.codelistRegionStart == -1: return
self.codehandlerRegionStart, self.codehandlerRegionEnd = self.getCodehandlerRegion()
if self.codehandlerRegionStart == -1: return
self.codehandler = self.getGeckoCodehandler()
self.spaceForGeckoCodelist = self.codelistRegionEnd - self.codelistRegionStart
self.spaceForGeckoCodehandler = self.codehandlerRegionEnd - self.codehandlerRegionStart
# Check that the codehandler can fit in the space defined for it
if self.codehandlerLength > self.spaceForGeckoCodehandler:
msg( 'Warning! The region designated to store the Gecko codehandler is too small! The codehandler is ' + uHex( self.codehandlerLength ) + ' bytes '
'in size, while the region defined for it is ' + uHex( self.spaceForGeckoCodehandler) + ' bytes long (only the first section among those '
'regions will be used). Gecko codes cannot be used until this is resolved.', 'Gecko Misconfiguration' )
else:
# If this has been reached, everything seems to check out.
self.environmentSupported = True
# Set the maximum value for the gecko code fill meter
freeGeckoSpaceIndicator['maximum'] = self.spaceForGeckoCodelist
def setGeckoHookOffset( self ):
""" Checks for the geckoConfiguration dictionary in the config file, and gets the hook offset for the current revision. """
if not self.geckoConfig: # For backwards compatability with pre-v4.0 config file.
oldHooksDict = getattr( settingsFile, "geckoHookOffsets", {} )
if not oldHooksDict:
self.hookOffset = -1
else:
if dol.region == 'PAL':
self.hookOffset = oldHooksDict['PAL']
else: self.hookOffset = oldHooksDict[dol.version]
# The usual expectation for v4.0+ settings files
elif self.geckoConfig:
self.hookOffset = self.geckoConfig['hookOffsets'].get( dol.revision, -1 ) # Assigns -1 if this dol revision isn't found
def getCodelistRegion( self ):
# Get the region defined in settings.py that is to be used for the Gecko codelist
if not self.geckoConfig and dol.isMelee: # For backwards compatability with pre-v4.0 config file.
self.codelistRegion = 'DebugModeRegion'
elif self.geckoConfig:
self.codelistRegion = self.geckoConfig['codelistRegion']
# Check for the codelist region among the defined regions, and get its first DOL area
if self.codelistRegion in dol.customCodeRegions:
return dol.customCodeRegions[self.codelistRegion][0]
else:
msg( 'Warning! The region assigned for the Gecko codelist (under geckoConfiguration in the settings.py file) could not '
'be found among the code regions defined for ' + dol.revision + '. Double check the spelling, '
'and keep in mind that the strings are case-sensitive. Gecko codes cannot be used until this is resolved.', 'Gecko Misconfiguration' )
self.codelistRegion = ''
return ( -1, -1 )
def getCodehandlerRegion( self ):
# Get the region defined in settings.py that is to be used for the Gecko codehandler
if not self.geckoConfig and dol.isMelee: # For backwards compatability with pre-v4.0 config file.
self.codehandlerRegion = 'AuxCodeRegions'
elif self.geckoConfig:
self.codehandlerRegion = self.geckoConfig['codehandlerRegion']
# Check for the codehandler region among the defined regions, and get its first DOL area
if self.codehandlerRegion in dol.customCodeRegions:
return dol.customCodeRegions[self.codehandlerRegion][0]
else:
msg( 'Warning! The region assigned for the Gecko codehandler (under geckoConfiguration in the settings.py file) could not '
'be found among the code regions defined for ' + dol.revision + '. Double check the spelling, '
'and keep in mind that the strings are case-sensitive. Gecko codes cannot be used until this is resolved.', 'Gecko Misconfiguration' )
self.codehandlerRegion = ''
return ( -1, -1 )
def getGeckoCodehandler( self ):
# Get the Gecko codehandler from an existing .bin file if present, or from the settings.py file
if os.path.exists( scriptHomeFolder + '\\codehandler.bin' ):
with open( scriptHomeFolder + '\\codehandler.bin', 'rb' ) as binFile:
geckoCodehandler = bytearray( binFile.read() )
else:
geckoCodehandler = bytearray.fromhex( settingsFile.geckoCodehandler )
# Append any padding needed to align its length to 4 bytes (so that any codes applied after it will be aligned).
self.codehandlerLength = len( geckoCodehandler )
totalRequiredCodehandlerSpace = roundTo32( self.codehandlerLength, base=4 ) # Rounds up to closest multiple of 4 bytes
paddingLength = totalRequiredCodehandlerSpace - self.codehandlerLength # in bytes
padding = bytearray( paddingLength )
geckoCodehandler.extend( padding ) # Extend returns nothing
return geckoCodehandler
def loadGeneralOptions():
""" Check for user defined settings / persistent memory, from the "options.ini" file.
If values don't exist in it for particular settings, defaults are loaded from the 'generalOptionDefaults' dictionary. """
# Read the file if it exists (this should create it if it doesn't)
if os.path.exists( genGlobals['optionsFilePath'] ):
settings.read( genGlobals['optionsFilePath'] )
# Add the 'General Settings' section if it's not already present
if not settings.has_section( 'General Settings' ):
settings.add_section( 'General Settings' )
# Set default [hardcoded] settings only if their values don't exist in the options file; don't want to modify them if they're already set.
for key, option in generalOptionDefaults.items():
if not settings.has_option( 'General Settings', key ): settings.set( 'General Settings', key, option )
onlyUpdateGameSettings.set( settings.getboolean( 'General Settings', 'onlyUpdateGameSettings' ) ) # Sets a booleanVar for the GUI
def getModsFolderPath( getAll=False ):
pathsString = settings.get( 'General Settings', 'modsFolderPath' )
pathsList = csv.reader( [pathsString] ).next()
if getAll:
return pathsList
pathIndex = int( settings.get('General Settings', 'modsFolderIndex') )
if pathIndex < 0 or pathIndex >= len( pathsList ):
print 'Invalid mods library path index:', pathIndex
return pathsList[0]
return pathsList[pathIndex]
def loadRegionOverwriteOptions():
""" Checks saved options (the options.ini file) for whether or not custom code regions are selected for use (i.e. can be overwritten).
This is called just before scanning/parsing mod libraries, checking for enabled codes, or installing a mods list.
Creates new BooleanVars only on first run, which should exist for the life of the program (they will only be updated after that). """
# Check for options file / persistent memory.
if os.path.exists( genGlobals['optionsFilePath'] ): settings.read( genGlobals['optionsFilePath'] )
if not settings.has_section( 'Region Overwrite Settings' ): settings.add_section( 'Region Overwrite Settings' )
# Create BooleanVars for each defined region. These will be used to track option changes
for regionName in dol.customCodeRegions.keys():
# Create a new boolVar entry for this region.
if regionName not in overwriteOptions: # This function may have already been called (s)
overwriteOptions[ regionName ] = BooleanVar()
# If the options file contains an option for this region, use it.
if settings.has_option( 'Region Overwrite Settings', regionName ):
overwriteOptions[ regionName ].set( settings.getboolean( 'Region Overwrite Settings', regionName ) )
# Otherwise, set a default for this region's use.
else: overwriteOptions[ regionName ].set( False )
# Set the option for allowing Gecko codes, if it doesn't already exist (initial program load)
if 'EnableGeckoCodes' not in overwriteOptions:
overwriteOptions[ 'EnableGeckoCodes' ] = BooleanVar()
# First check whether a setting already exists for this in the options file
if settings.has_option( 'Region Overwrite Settings', 'EnableGeckoCodes' ):
allowGeckoCodes = settings.getboolean( 'Region Overwrite Settings', 'EnableGeckoCodes' )
else: # The option doesn't exist in the file
allowGeckoCodes = False
if not dol.data:
overwriteOptions[ 'EnableGeckoCodes' ].set( allowGeckoCodes )
return # No file loaded; can't confirm gecko settings are valid
# Check that the gecko configuration in settings.py are valid
if not allowGeckoCodes or not gecko.environmentSupported:
overwriteOptions[ 'EnableGeckoCodes' ].set( False )
else: # Make sure the appropriate regions required for gecko codes are also set.
if overwriteOptions[ gecko.codelistRegion ].get() and overwriteOptions[ gecko.codehandlerRegion ].get():
overwriteOptions[ 'EnableGeckoCodes' ].set( True )
else:
promptToUser = ( 'The option to enable Gecko codes is set, however the regions required for them, '
'the ' + gecko.codelistRegion + ' and ' + gecko.codehandlerRegion + ', are not set for use (i.e for partial or full overwriting).'
'\n\nDo you want to allow use of these regions \nin order to allow Gecko codes?' )
willUserAllowGecko( promptToUser, False, root ) # Will prompt the user with the above message and set the overwriteOption
def loadImageBank():
""" Loads and stores images required by the GUI. This allows all of the images to be
stored together in a similar manner, and ensures references to all of the loaded
images are stored, which prevents them from being garbage collected (which would
otherwise cause them to disappear from the GUI after rendering is complete). The
images are only loaded when first requested, and then kept for future reference. """
loadFailed = []
for item in os.listdir( os.path.join( scriptHomeFolder, 'imgs' ) ):
if not item.endswith( '.png' ): continue
filepath = os.path.join( scriptHomeFolder, 'imgs', item )
try:
imageBank[item[:-4]] = ImageTk.PhotoImage( Image.open(filepath) )
except: loadFailed.append( filepath )
if loadFailed:
msg( 'Unable to load some images:\n\n' + '\n'.join(loadFailed) )
def openFileByField( event ):
""" Called by the user pressing Enter in the "ISO / DOL" field. Simply attempts
to load whatever path is in the text field. """
readRecievedFile( openedFilePath.get().replace('"', '') )
playSound( 'menuChange' )
# Move the cursor position to the end of the field (default is the beginning)
event.widget.icursor( Tkinter.END )
def openFileByButton():
""" Called by the "Open" button. Prompts the user for a file to open, and then
attempts to open it. """
# Set the default file formats to choose from in the file chooser dialog box
defaultFileFormat = settings.get( 'General Settings', 'defaultFileFormat' )
if defaultFileFormat.lower() == 'dol' or defaultFileFormat.lower() == '.dol':
filetypes = [ ('Melee executable', '*.dol'), ('Disc image files', '*.iso *.gcm'), ('all files', '*.*') ]
else: filetypes = [ ('Disc image files', '*.iso *.gcm'), ('Melee executable', '*.dol'), ('all files', '*.*') ]
# Present a file chooser dialog box to the user
filepath = tkFileDialog.askopenfilename(
title="Choose a DOL or disc image file to open.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
defaultextension=defaultFileFormat,
filetypes=filetypes
)
if filepath:
readRecievedFile( filepath )
playSound( 'menuChange' )
def restoreOriginalDol():
if problemWithDol(): return
elif not dol.revision:
msg( 'The revision of the currently loaded DOL could not be determined.' )
return
restoreConfirmed = tkMessageBox.askyesno( 'Restoration Confirmation', 'This will revert the currently loaded DOL to be practically '
'identical to a vanilla ' + dol.revision + ' DOL (loaded from the "Original DOLs" folder). '
'"Free space" regions selected for use will still be zeroed-out. This process does not preserve '
'a copy of the current DOL, and any current changes will be lost.\n\nAre you sure you want to do this?' )
if restoreConfirmed:
vanillaDol = loadVanillaDol() # Should prompt the user with details if there's a problem here
if vanillaDol and vanillaDol.data:
# Seems that the original DOL was loaded successfully. Perform the data replacement.
dol.data = vanillaDol.data # Will be re-signed when the user saves.
# Rescan for mods
checkForEnabledCodes()
# Ensure that the save buttons are enabled
saveChangesBtn.config( state='normal' )
saveChangesBtn2.config( state='normal' )
# Provide user feedback
playSound( 'menuChange' )
programStatus.set( 'Restoration Successful' )
else:
programStatus.set( 'Restoration Unsuccessful' )
def readRecievedFile( filepath, defaultProgramStatus='', checkForCodes=True ):
if dol.isLoading: # Simple failsafe
discVersion.set( '' )
dolVersion.set( 'File already loading!' )
return
# Reset/clear the gui.
discVersion.set( '' )
programStatus.set( defaultProgramStatus )
openedFilePath.set( '' )
dolVersion.set( 'Nothing Loaded' )
showRegionOptionsBtn.config( state='disabled' )
exportFileBtn.config( state='disabled' )
importFileBtn.config( state='disabled' )
restoreDolBtn.config( state='disabled' )
# Validate the given path, and update the default search directory and file format settings
normalizedPath = os.path.normpath( filepath ).replace('{', '').replace('}', '')
if not normalizedPath or not os.path.exists( normalizedPath ):
saveChangesBtn.config( state='disabled' )
saveChangesBtn2.config( state='disabled' )
saveChangesAsBtn.config( state='disabled' )
saveChangesAsBtn2.config( state='disabled' )
msg( 'Unable to find this file: "' + normalizedPath + '".', 'Invalid Filepath' )
return
else: # File seems good
saveChangesAsBtn.config( state='normal' )
saveChangesAsBtn2.config( state='normal' )
# The other standard save buttons will become enabled once changes are detected.
# Load the DOL file to be modded (from a disc or DOL file path)
# tic = time.clock()
dol.load( normalizedPath )
# toc = time.clock()
# print '\nDOL load time:', toc-tic
dol.loadCustomCodeRegions()
# Remember the given path and file type for later defaults
settings.set( 'General Settings', 'defaultSearchDirectory', os.path.dirname( normalizedPath ).encode('utf-8').strip() )
settings.set( 'General Settings', 'defaultFileFormat', os.path.splitext( normalizedPath )[1].replace('.', '').encode('utf-8').strip() )
saveOptions()
# Update the GUI with the ISO/DOL's details
if dol.revision: dolVersion.set( dol.version + ' DOL Detected' )
else: dolVersion.set( 'Unknown DOL Revision' )
openedFilePath.set( normalizedPath.replace('"', '') )
showRegionOptionsBtn.config( state='normal' )
if dol.type == 'iso' or dol.type == 'gcm':
if dol.discVersion: discVersion.set( dol.discVersion + ' Disc,' )
exportFileBtn.config( state='normal' )
elif dol.type == 'dol': importFileBtn.config( state='normal' )
restoreDolBtn.config( state='normal' )
# Enable buttons on the Summary tab
for widget in summaryTabFirstRow.rightColumnFrame.winfo_children():
widget['state'] = 'normal'
# Validate the settings.py file for parameters on the installation of Gecko codes
gecko.checkSettings()
# Output some info to console
if 1:
print '\nPath:', '\t', dol.path
print 'File Type:', '\t', dol.type
print 'Disc Version:', '\t', dol.discVersion
print 'DOL Offset:', '\t', hex( dol.offset )
print 'DOL Size:', '\t', hex( len(dol.data) /2 )
print 'DOL Region:', '\t', dol.region
print 'DOL Version:', '\t', dol.version
print 'Is Melee:', '\t', dol.isMelee
print 'Is 20XX: ', '\t', dol.is20XX
print 'Max DOL Offset:', hex( dol.maxDolOffset )
print 'bssMemAddress:', hex( dol.bssMemAddress ).replace('L', '')
print 'bssSize:', hex( dol.bssSize )
print 'entryPoint:', hex( dol.entryPoint ).replace('L', '')
if 1: # DOL Section printout
print '\n\tfO, RAM, size'
for sectionName, ( fileOffset, memAddress, size ) in dol.sectionInfo.items():
print sectionName + ':', hex(fileOffset), hex(memAddress), hex(size)
for regionName, regions in dol.customCodeRegions.items():
print '\n\t', regionName + ':\t', [(hex(start), hex(end)) for start, end in regions]
# Gecko configuration printout
print '\nGecko configuration:'
for key, value in gecko.__dict__.items():
if key == 'codehandler': continue
if type( value ) == int:
print '\t', key + ':', hex(value)
else:
print '\t', key + ':', value
print ''
if checkForCodes and dol.data:
collectAllStandaloneFunctions()
checkForEnabledCodes()
def isSpecialBranchSyntax( code ):
""" Identifies syntaxes such as "bl 0x800948a8" or "bl <functionName>".
Comments should already have been removed by this point. """
lineParts = code.split()
if code.lower().startswith( 'b' ) and len( lineParts ) == 2:
targetDescriptor = lineParts[1]
if targetDescriptor.startswith('0x8') and len( targetDescriptor ) == 10: return True # Using a RAM address
elif isStandaloneFunctionHeader( targetDescriptor ): return True # Using a function name
return False
def isStandaloneFunctionHeader( targetDescriptor ):
""" Identifies a string representing a standalone function. Usually a text line for a mod description
header, but may also be used to help recognize the latter half (i.e. target descriptor) of a
special branch syntax (such as the '<ShineActionState>' from 'bl <ShineActionState>').
Comments should already have been removed by this point. """
if targetDescriptor.startswith('<') and '>' in targetDescriptor and not ' ' in targetDescriptor.split( '>' )[:1]:
return True
else:
return False
def containsPointerSymbol( codeLine ): # Comments should already be excluded
""" Returns a list of names, but can also just be evaluated as True/False. """
symbolNames = []
if '<<' in codeLine and '>>' in codeLine:
for block in codeLine.split( '<<' )[1:]: # Skips first block (will never be a symbol)
if '>>' in block:
for potentialName in block.split( '>>' )[:-1]: # Skips last block (will never be a symbol)
if potentialName != '' and ' ' not in potentialName:
symbolNames.append( potentialName )
return symbolNames
def isGeckoCodeHeader( codeline ): # Should return True for short header lines such as '1.02', 'PAL', 'ALL', etc (old syntaxes), or 'NTSC 1.02', 'PAL 1.00', etc (new syntaxes)
if codeline == '': return False
isGeckoHeader = False
if len( codeline ) < 10 and not '<' in codeline:
codeline = codeline.upper()
# Check for the old formatting first ('PAL'), or a header designating a Gecko code for all revisions ('ALL')
if codeline == 'PAL' or codeline == 'ALL': isGeckoHeader = True
# All other conditions should include a version number (e.g. '1.02')
elif '.' in codeline[1:]: # Excludes first character, which may be a period indicating an assembly directive
# Check for a short version number string (still old formatting), e.g. '1.00', '1.01'
if len( codeline ) == 4: isGeckoHeader = True
elif 'NTSC' in codeline or 'PAL' in codeline: isGeckoHeader = True # Should be the new syntax, e.g. 'NTSC 1.02'
return isGeckoHeader
def normalizeRegionString( revisionString ):
""" Ensures consistency in revision strings. Should produce something like 'NTSC 1.02', 'PAL 1.00', etc., or 'ALL'. """
revisionString = revisionString.upper()
if 'ALL' in revisionString: return 'ALL'
# Check for a game/dol version
verIdPosition = revisionString.find( '.' )
if verIdPosition == -1: ver = '1.00' # The default assumption
else: ver = revisionString[verIdPosition-1:verIdPosition+3]
# Check for the region
if 'PAL' in revisionString: region = 'PAL '
else: region = 'NTSC ' # The default assumption
return region + ver
def parseSettingsFileRegionName( fullRegionName ):
revisionList = []
regionName = ''
if '|' in fullRegionName: # Using new naming convention (MCM version 4+); should start with something like 'NTSC 1.02', or 'PAL 1.00', or 'NTSC 1.02, PAL 1.00'
revisionString, regionName = fullRegionName.split( '|' )
revisionStringList = revisionString.split( ',' )
for revisionString in revisionStringList:
normalizedRevisionString = normalizeRegionString( revisionString )
if normalizedRevisionString == 'ALL':
revisionList = [ 'ALL' ]
break
else:
revisionList.append( normalizedRevisionString.upper() )
# Attempt to match using the old (MCM v3) naming convention
elif fullRegionName.startswith( 'v10' ):
revisionList = [ 'NTSC 1.' + fullRegionName[2:4] ]
regionName = fullRegionName[4:]
elif fullRegionName.startswith( 'vPAL' ): # Using the old naming convention (MCM version 3.x)
revisionList = [ 'PAL 1.00' ]
regionName = fullRegionName[4:]
elif fullRegionName.startswith( 'vAll' ):
revisionList = [ 'ALL' ]
regionName = fullRegionName[4:]
else: msg( 'Warning! Invalid code region name, "' + fullRegionName + '", defined in the settings.py file.' )
return revisionList, regionName
def parseModsLibraryFile( filepath, modModulesParent, includePaths ):
""" This is the main parsing function for the Mods Library, which reads a single text file and creates modModules out of the mods found.
These modules are then attached to the GUI within the given parent, a VerticalScrolledFrame. """
geckoCodesAllowed = overwriteOptions[ 'EnableGeckoCodes' ].get()
# Open the text file and get its contents, creating a list of raw chunks of text for each mod
with open( filepath, 'r' ) as modFile:
mods = modFile.read().split( '-==-' )
# Parse each chunk of text for each mod, to get its info and code changes
for fileIndex, mod in enumerate( mods ):
if mod.strip() == '' or mod.lstrip()[0] == '!': continue # Skip this mod.
if modsLibraryNotebook.stopToRescan: break
else:
modName = modAuth = currentRevision = ''
modDesc = []
customCode = []
standaloneName = origHex = newHex = ''
longStaticOriginal = []
isInjectionCode = isGeckoCode = isLongStaticOriginal = isLongStaticNew = False
currentStandaloneRevisions = []
modType = 'static'
basicInfoCollected = parsingErrorOccurred = False
offsetString = ''
missingIncludes = False
assemblyError = False
webLinks = [] # Will be a list of tuples, of the form (urlparseObject, comments)
modData = {} # A dictionary that will be populated by lists of "codeChange" tuples
# Iterate over the text/code lines for this mod
for rawLine in mod.splitlines():
# Separate out comments for parsing purposes.
if '##' in rawLine:
rawLine = rawLine.split( '##' )[0].strip() # Comments with these, '##' (hard comments), are totally ignored by the parser.
# Separate lines of description or code text from comments
if '#' in rawLine:
lineParts = rawLine.split( '#' )
# Check if this line is a url containing a fragment identifier
if lineParts[0].lstrip().startswith( '<' ):
# Look for the ending '>' character. Put everything to the left of it into 'line', and everything else should be a comment
for i, part in enumerate( lineParts, start=1 ):
if part.rstrip().endswith( '>' ):
line = '#'.join( lineParts[:i] ).strip()
lineComments = ' #' + '#'.join( lineParts[i:] )
break
else: # No ending '>'; guess this wasn't a url
line = lineParts[0].strip() # Remove whitespace from start and end of line
lineComments = ' #' + '#'.join( lineParts[1:] )
else:
line = lineParts[0].strip() # Remove whitespace from start and end of line
lineComments = ' #' + '#'.join( lineParts[1:] )
else:
line = rawLine.strip()
lineComments = ''
if not basicInfoCollected: # The purpose of this flag is to avoid all of the extra checks in this block once this info is collected.
# Capture the first non-blank, non-commented line for the name of the mod.
if modName == '' and line != '':
modName = rawLine
continue
if line.startswith( '<' ) and line.endswith( '>' ):
potentialLink = line[1:-1].replace( '"', '' ) # Removes beginning/ending angle brackets
webLinks.append( (potentialLink, lineComments) ) # Will be validated on GUI creation
elif line.startswith( '[' ) and line.endswith( ']' ):
modAuth = line.split(']')[0].replace( '[', '' )
basicInfoCollected = True
else: # Assume all other lines are more description text
modDesc.append( rawLine )
elif ( line.startswith('Version') or line.startswith('Revision') ) and 'Hex to Replace' in line: continue
else: # If this is reached, the name, description, and author have been parsed.
isVersionHeader = False
headerStringStart = ''
# Check if this is the start of a new code change
if '---' in line:
headerStringStart = line.split('---')[0].rstrip().replace(' ', '') # Left-hand strip of whitespace has already been done
if headerStringStart: # i.e. not an empty string
isVersionHeader = True
# Check if it's a Gecko codes header (the version or revision should be the only thing on that line), but don't set that flag yet
elif isGeckoCodeHeader( line ):
isVersionHeader = True
headerStringStart = line
# If this is a header line (marking the start of a new code change), check for lingering collected codes that must first be added to the previous code change.
if ( isVersionHeader or '---' in line or isStandaloneFunctionHeader(line) ) and customCode != []:
# Perform a quick check
# returnCode = 2
# if len( customCode ) == 1:
# customCodeLine = customCode[0].strip()
# if len( customCodeLine ) == 8 and validHex( customCodeLine ):
# rawCustomCode = preProcessedCustomCode = customCodeLine
# returnCode = 0
# #print 'skipped', customCode
# if returnCode != 0:
rawCustomCode = '\n'.join( customCode ).strip() # Collapses the list of collected code lines into one string, removing leading & trailing whitespace
returnCode, preProcessedCustomCode = customCodeProcessor.preAssembleRawCode( customCode, includePaths, suppressWarnings=True )
#print 'still processed', customCode
# If pre-processing was successful, assemble collected info into a "code change" tuple,
# which are of the form: ( changeType, customCodeLength, offset, originalCode, customCode, preProcessedCustomCode )
if returnCode == 0 and preProcessedCustomCode:
customCodeLength = getCustomCodeLength( preProcessedCustomCode )
if isInjectionCode:
modData[currentRevision].append( ( 'injection', customCodeLength, offsetString, origHex, rawCustomCode, preProcessedCustomCode ) )
isInjectionCode = False
elif isGeckoCode:
modData[currentRevision].append( ( 'gecko', customCodeLength, '', '', rawCustomCode, preProcessedCustomCode ) )
isGeckoCode = False
elif standaloneName != '':
for revision in currentStandaloneRevisions:
if revision not in modData: modData[revision] = []
modData[revision].append( ( 'standalone', customCodeLength, standaloneName, '', rawCustomCode, preProcessedCustomCode ) )
currentStandaloneRevisions = []
standaloneName = ''
elif isLongStaticNew:
modData[currentRevision].append( ( 'static', customCodeLength, offsetString, '\n'.join(longStaticOriginal), rawCustomCode, preProcessedCustomCode ) )
longStaticOriginal = []
isLongStaticNew = False
else: # Standard static overwrite
if not origHex or not customCode:
cmsg( '\nProblem detected while parsing "' + modName + '" in the mod library file "'
+ os.path.basename( filepath ) + '" (index ' + str(fileIndex+1) + ').\n\n'
'One of the inputs for a static overwrite (the original or new custom code) were found to be empty, which means that '
'the mod is probably not formatted correctly.', 'Incorrect Mod Formatting (Error Code 04.4)' )
parsingErrorOccurred = True
customCode = []
break
elif getCustomCodeLength( origHex ) != customCodeLength: # Not pre-processing in this case since it should only be one line
cmsg( '\nProblem detected while parsing "' + modName + '" in the mod library file "'
+ os.path.basename( filepath ) + '" (index ' + str(fileIndex+1) + ').\n\n'
'Inputs for static overwrites (the original code and custom code) should be the same '
'length. Original code:\n' + origHex + '\n\nCustom code:\n' + rawCustomCode, 'Incorrect Mod Formatting (Error Code 04.3)' )
parsingErrorOccurred = True
customCode = []
break
else:
modData[currentRevision].append( ( 'static', customCodeLength, offsetString, origHex, rawCustomCode, preProcessedCustomCode ) )
elif returnCode == 2:
assemblyError = True
currentStandaloneRevisions = []
break
elif returnCode == 3: # preProcessedCustomCode should be the missing include file name
missingIncludes = True
currentStandaloneRevisions = []
break
customCode = []
elif line == '->': # Divider between long static overwrite original and new code.
if isLongStaticOriginal and longStaticOriginal != []:
isLongStaticOriginal = False
isLongStaticNew = True
continue
if isVersionHeader:
# Track the version that subsequent code lines are for
currentRevision = normalizeRegionString( headerStringStart )
if currentRevision not in modData: modData[currentRevision] = []
if isGeckoCodeHeader( line ):
isGeckoCode = True # True for just this code change, not necessarily for the whole mod, like the variable below
modType = 'gecko'
elif isStandaloneFunctionHeader( line ):
modType = 'standalone'
standaloneName, revisionIdentifiers = line.lstrip()[1:].split( '>' )
# Parse content after the header (on the same line) to see if they are identifiers for what game version to add this for.
if revisionIdentifiers.strip() == '':
currentStandaloneRevisions = [ 'ALL' ]
else:
for revision in revisionIdentifiers.split(','):
thisRevision = normalizeRegionString( revision )
if thisRevision == 'ALL': # Override any versions that might already be accumulated, and set the list to just ALL
currentStandaloneRevisions = [ 'ALL' ]
break
else: currentStandaloneRevisions.append( thisRevision )
continue
if currentRevision != '' or currentStandaloneRevisions != []: # At least one of these should be set, even for subsequent lines that don't have a version header.
if line and '---' in line: # Ensures the line isn't blank, and it is the start of a new code change definition
hexCodes = [ item.strip() for item in line.replace('->', '--').split('-') if item ] # Breaks down the line into a list of values.
if isVersionHeader: hexCodes.pop(0) # Removes the revision indicator.
offsetString = hexCodes[0]
totalValues = len( hexCodes )
if totalValues == 1: # This is the header for a Long static overwrite (e.g. "1.02 ----- 0x804d4d90 ---"). (hexCodes would actually be just ["0x804d4d90"])
isLongStaticOriginal = True
elif totalValues == 2: # Should have an offset and an origHex value; e.g. from a line like "1.02 ------ 804D7A4C --- 00000000 ->"
origHex = ''.join( hexCodes[1].replace('0x', '').split() ) # Remove whitespace
if not validHex( origHex ): # This is the game's original code, so it should just be hex.
msg( 'Problem detected while parsing "' + modName + '" in the mod library file "'
+ os.path.basename( filepath ) + '" (index ' + str(fileIndex+1) + ').\n\n'
'There is an invalid (non-hex) original hex value: ' + origHex, 'Incorrect Mod Formatting (Error Code 04.2)' )
parsingErrorOccurred = True
customCode = []
break
elif totalValues > 2: # Could be a standard static overwrite (1-liner), long static overwrite, or an injection mod
origHex = ''.join( hexCodes[1].replace('0x', '').split() ) # Remove whitespace
newHex = hexCodes[2]
if newHex.lower() == 'branch':
isInjectionCode = True # Will later be switched back off, which is why this is separate from the modType variable below
if modType == 'static': modType = 'injection' # 'static' is the only type that 'injection' can override.
else:
# If the values exist and are valid, add a codeChange tuple to the current game version changes list.
if not validHex( offsetString.replace( '0x', '' ) ): # Should just be a hex offset.
msg( 'Problem detected while parsing "' + modName + '" in the mod library file "'
+ os.path.basename( filepath ) + '" (index ' + str(fileIndex+1) + ').\n\n'
'There is an invalid (non-hex) offset value: ' + offsetString, 'Incorrect Mod Formatting (Error Code 04.1)' )
parsingErrorOccurred = True
customCode = []
break
elif not validHex( origHex ): # This is the game's original code, so it should just be hex.
msg( 'Problem detected while parsing "' + modName + '" in the mod library file "'
+ os.path.basename( filepath ) + '" (index ' + str(fileIndex+1) + ').\n\n'
'There is an invalid (non-hex) original hex value: ' + origHex, 'Incorrect Mod Formatting (Error Code 04.2)' )
parsingErrorOccurred = True
customCode = []
break
else:
customCode.append( newHex + lineComments )
if not offsetString.startswith( '0x' ):
offsetString = '0x' + offsetString
elif not isVersionHeader:
if isLongStaticOriginal:
longStaticOriginal.append( line )
else: # This may be an empty line/whitespace. Only adds this if there is already custom code accumulating for something.
customCode.append( rawLine )
# End of per-line loop for the current mod (all lines have now been gone through).
# If there is any code left, save it to the last version codeset.
if customCode != [] or currentStandaloneRevisions != []:
rawCustomCode = '\n'.join( customCode ).strip()
returnCode, preProcessedCustomCode = customCodeProcessor.preAssembleRawCode( customCode, includePaths, suppressWarnings=True )
if returnCode == 0 and preProcessedCustomCode:
customCodeLength = getCustomCodeLength( preProcessedCustomCode )
if isInjectionCode: modData[currentRevision].append( ( 'injection', customCodeLength, offsetString, origHex, rawCustomCode, preProcessedCustomCode ) )
elif isGeckoCode: modData[currentRevision].append( ( 'gecko', customCodeLength, '', '', rawCustomCode, preProcessedCustomCode ) )
elif standaloneName:
for revision in currentStandaloneRevisions:
if revision not in modData: modData[revision] = []
modData[revision].append( ( 'standalone', customCodeLength, standaloneName, '', rawCustomCode, preProcessedCustomCode ) )
elif isLongStaticNew:
modData[currentRevision].append( ( 'static', customCodeLength, offsetString, '\n'.join(longStaticOriginal), rawCustomCode, preProcessedCustomCode ) )
else: # standard static (1-liner)
if not origHex or not newHex:
cmsg( '\nProblem detected while parsing "' + modName + '" in the mod library file "'
+ os.path.basename( filepath ) + '" (index ' + str(fileIndex+1) + ').\n\n'
'One of the inputs for a static overwrite (origHex or newHex) were found to be empty, which means that '
'the mod is probably not formatted correctly.', 'Incorrect Mod Formatting (Error Code 04.4)' )
parsingErrorOccurred = True
customCode = []
break
elif getCustomCodeLength( origHex ) != customCodeLength:
cmsg( '\nProblem detected while parsing "' + modName + '" in the mod library file "'
+ os.path.basename( filepath ) + '" (index ' + str(fileIndex+1) + ').\n\n'
'Inputs for static overwrites (the original code and custom code) should be the same '
'length. Original code:\n' + origHex + '\n\nCustom code:\n' + newHex, 'Incorrect Mod Formatting (Error Code 04.3)' )
parsingErrorOccurred = True
customCode = []
break
else:
modData[currentRevision].append( ( 'static', customCodeLength, offsetString, origHex, rawCustomCode, preProcessedCustomCode ) )
elif returnCode == 2:
assemblyError = True
elif returnCode == 3:
missingIncludes = True
# If codes were found for the current mod, create a gui element for it and populate it with the mod's details.
if not modsLibraryNotebook.stopToRescan: # This might be queued in order to halt and restart the scan
# Create a new module in the GUI, both for user interaction and data storage
newModModule = ModModule( modModulesParent, modName, '\n'.join(modDesc).strip(), modAuth, modData, modType, webLinks )
newModModule.pack( fill='x', expand=1 )
newModModule.sourceFile = filepath
newModModule.fileIndex = fileIndex
newModModule.includePaths = includePaths
genGlobals['allMods'].append( newModModule )
# Set the mod widget's status and add it to the global allModNames list
if modData == {}: newModModule.setState( 'unavailable', specialStatusText='Missing mod data.' )
elif parsingErrorOccurred: newModModule.setState( 'unavailable', specialStatusText='Error detected during parsing.' )
elif assemblyError: newModModule.setState( 'unavailable', specialStatusText='Error during assembly' )
elif missingIncludes: newModModule.setState( 'unavailable', specialStatusText='Missing include file: ' + preProcessedCustomCode )
else:
# No problems detected so far. Check if this is a duplicate mod, and add it to the list of all mods if it's not
if modName in genGlobals['allModNames']:
newModModule.setState( 'unavailable', specialStatusText='Duplicate Mod' )
else:
genGlobals['allModNames'].add( modName )
if modType == 'gecko' and not geckoCodesAllowed:
newModModule.setState( 'unavailable' )
elif settingsFile.alwaysEnableCrashReports and modName == "Enable OSReport Print on Crash":
newModModule.setState( 'pendingEnable' )
class CodeMod( object ):
""" Basically just a data container for now. So far only used for the new ASM Mod Folder Structure (AMFS).
For parsing of MCM's usual format, see the parseModsLibraryFile function above. """
def __init__( self, name, auth='', desc='', srcPath='', isAmfs=False ):
self.name = name
self.auth = auth
self.desc = desc
self.type = 'static'
self.data = {} # A dictionary that will be populated by lists of "codeChange" tuples
self.path = srcPath # Root folder path that contains this mod
self.isAmfs = isAmfs
self.webLinks = []
self.includePaths = []
class ModsLibraryParser():
""" So far only used for the new ASM Mod Folder Structure (AMFS). For parsing
of MCM's usual format, see the parseModsLibraryFile function above. """
def __init__( self, modModulesParent, includePaths ):
self.modModulesParent = modModulesParent
self.includePaths = includePaths
self.errors = []
def parseAmfs( self, folderName, fullFolderPath, jsonContents ):
""" This method is the primary handler of the ASM Mod Folder Structure (AMFS). This will
create a mod container object to store the mod's code changes and other data, and
step through each code change dictionary in the JSON file's build list. """
codeSection = jsonContents.get( 'codes' )
if codeSection:
for codeset in jsonContents['codes']:
# Typecast the authors and description lists to strings
authors = ', '.join( codeset['authors'] )
description = '\n'.join( codeset['description'] )
mod = CodeMod( codeset['name'], authors, description, fullFolderPath, True )
# Set the revision (region/version)
revision = codeset.get( 'revision', 'NTSC 1.02' )
mod.revision = normalizeRegionString( revision ) # Normalize it
mod.data[revision] = [] # Initialize a codeChange list for it
# Load a vanilla DOL for the above revision (will only actually be loaded once)
mod.vanillaDol = loadVanillaDol( revision ) # This will report any errors it has
mod.assemblyError = False
mod.parsingError = False
mod.missingVanillaHex = False
mod.missingIncludes = ''
mod.includePaths = self.includePaths
mod.webLinks = codeset.get( 'webLinks', () )
buildSet = codeset.get( 'build' )
if buildSet:
for codeChangeDict in buildSet:
codeType = codeChangeDict['type']
if codeType == 'replace': # Static Overwrite; basically an 04 Gecko codetype (hex from json)
self.parseAmfsReplace( codeChangeDict, mod )
if mod.assemblyError or mod.missingVanillaHex or mod.missingIncludes: break
elif codeType == 'inject': # Standard code injection
self.parseAmfsInject( codeChangeDict, mod )
if mod.assemblyError or mod.parsingError or mod.missingVanillaHex or mod.missingIncludes: break
elif mod.type == 'static': mod.type = 'injection' # 'static' is the only type that 'injection' can override.
elif codeType == 'replaceCodeBlock': # Static overwrite of variable length (hex from file)
self.parseAmfsReplaceCodeBlock( codeChangeDict, mod )
if mod.assemblyError or mod.missingVanillaHex or mod.missingIncludes: break
elif codeType == 'branch' or codeType == 'branchAndLink':
self.errors.append( 'The ' + codeType + ' AMFS code type is not yet supported' )
elif codeType == 'injectFolder':
self.parseAmfsInjectFolder( codeChangeDict, mod )
if mod.assemblyError or mod.parsingError or mod.missingVanillaHex or mod.missingIncludes: break
elif mod.type == 'static': mod.type = 'injection' # 'static' is the only type that 'injection' can override.
elif codeType == 'replaceBinary':
self.errors.append( 'The replaceBinary AMFS code type is not yet supported' )
elif codeType == 'binary':
self.errors.append( 'The binary AMFS code type is not yet supported' )
else:
self.errors.append( 'Unrecognized AMFS code type: ' + codeType )
# Create a new code module, and add it to the GUI
self.buildCodeModule( mod )
else: # Build all subfolders/files
self.errors.append( "No 'build' section found in codes.json" )
else: # Grab everything from the current folder (and subfolders). Assume .s are static overwrites, and .asm are injections
# Typecast the authors and description lists to strings
# authors = ', '.join( codeset['authors'] )
# description = '\n'.join( codeset['description'] )
# mod = CodeMod( codeset['name'], authors, description, fullFolderPath, True )
self.errors.append( "No 'codes' section found in codes.json" ) #todo
def buildCodeModule( self, mod ):
""" Builds a code module for the GUI, sets its status, and adds it to the interface. """
# Create a new module in the GUI, both for user interaction and data storage
newModModule = ModModule( self.modModulesParent, mod.name, mod.desc, mod.auth, mod.data, mod.type, mod.webLinks )
newModModule.pack( fill='x', expand=1 )
newModModule.sourceFile = mod.path
newModModule.fileIndex = 0
newModModule.includePaths = mod.includePaths
genGlobals['allMods'].append( newModModule )
# Set the mod widget's status and add it to the global allModNames list
if mod.data == {}: newModModule.setState( 'unavailable', specialStatusText='Missing mod data.' )
elif mod.parsingError: newModModule.setState( 'unavailable', specialStatusText='Error detected during parsing.' )
elif mod.missingVanillaHex: newModModule.setState( 'unavailable', specialStatusText='Unable to get vanilla hex' )
elif mod.assemblyError: newModModule.setState( 'unavailable', specialStatusText='Error during assembly' )
elif mod.missingIncludes: newModModule.setState( 'unavailable', specialStatusText='Missing include file: ' + mod.missingIncludes )
else:
# No problems detected so far. Check if this is a duplicate mod, and add it to the list of all mods if it's not
if mod.name in genGlobals['allModNames']:
newModModule.setState( 'unavailable', specialStatusText='Duplicate Mod' )
else:
genGlobals['allModNames'].add( mod.name )
if mod.type == 'gecko' and not overwriteOptions[ 'EnableGeckoCodes' ].get():
newModModule.setState( 'unavailable' )
elif settingsFile.alwaysEnableCrashReports and mod.name == "Enable OSReport Print on Crash":
newModModule.setState( 'pendingEnable' )
if self.errors:
print '\nFinal errors:', '\n'.join( self.errors )
def parseAmfsReplace( self, codeChangeDict, mod ):
""" AMFS Static Overwrite of 4 bytes; custom hex code sourced from json file. """
# Pre-process the custom code (make sure there's no whitespace, and/or assemble it)
customCode = codeChangeDict['value']
returnCode, preProcessedCustomCode = customCodeProcessor.preAssembleRawCode( customCode, mod.includePaths, suppressWarnings=True )
if returnCode in ( 1, 2 ):
mod.assemblyError = True
self.errors.append( "Encountered a problem while assembling a 'replace' code change" )
return
elif returnCode == 3: # Missing an include file
mod.missingIncludes = preProcessedCustomCode # The custom code string will be the name of the missing include file
self.errors.append( "Unable to find this include file: " + preProcessedCustomCode )
return
# Get the offset of the code change, and the original code at that location
offset = codeChangeDict['address']
dolOffset = normalizeDolOffset( offset, dolObj=mod.vanillaDol )
origHex = getVanillaHex( dolOffset, revision=mod.revision, suppressWarnings=False )
if not origHex:
mod.missingVanillaHex = True
self.errors.append( "Unable to get original code for a 'replace' code change" )
return
# Preserve the annotation using a comment
annotation = codeChangeDict.get( 'annotation', None )
if annotation:
customCode += ' # ' + annotation
mod.data[mod.revision].append( ('static', 4, offset, origHex, customCode, preProcessedCustomCode) )
def readInjectionAddressHeader( self, asmFile ):
# Parse the first line to get an injection site (offset) for this code
headerLine = asmFile.readline()
# Check for the original 1-line format
if headerLine.startswith( '#' ) and 'inserted' in headerLine:
return headerLine.split()[-1] # Splits by whitespace and gets the resulting last item
# Check for the multi-line format
elif headerLine.startswith( '#######' ):
while 1:
line = asmFile.readline()
if 'Address:' in line:
return line.split()[2]
elif line.startswith( '#######' ):
return -1 # Failsafe; reached the end of the header without finding the address
def getCustomCodeFromFile( self, fullAsmFilePath, mod, parseOffset=False, annotation='' ):
""" Gets custom code from a given file and pre-processes it (removes whitespace, and/or assembles it).
If parseOffset is False, the offset in the file header isn't needed because the calling function
already has it from a codeChange dictionary. If it does need to be parsed, the calling function
only had a sourceFile for reference (most likely through a injectFolder code type). """
if not annotation: # Use the file name for the annotation (without file extension)
annotation = os.path.splitext( os.path.basename(fullAsmFilePath) )[0]
# Get the custom code, and the address/offset if needed
try:
# Open the file in byte-reading mode (rb). Strings will then need to be encoded.
with codecs.open( fullAsmFilePath, encoding='utf-8' ) as asmFile: # Using a different read method for UTF-8 encoding
if parseOffset:
offset = self.readInjectionAddressHeader( asmFile )
decodedString = asmFile.read().encode( 'utf-8' )
customCode = '# {}\n{}'.format( annotation, decodedString )
else:
offset = ''
# Clean up the header line (changing first line's "#To" to "# To")
firstLine = asmFile.readline()
if firstLine.startswith( '#' ):
customCode = '# {}\n# {}\n{}'.format( annotation, firstLine.lstrip( '# '), asmFile.read().encode( 'utf-8' ) )
else:
customCode = '# {}\n{}\n{}'.format( annotation, firstLine, asmFile.read().encode( 'utf-8' ) )
except IOError as err: # File couldn't be found
mod.parsingError = True
print err
self.errors.append( "Unable to find the file " + os.path.basename(fullAsmFilePath) )
return 4, '', '', ''
except Exception as err: # Unknown error
mod.parsingError = True
print err
self.errors.append( 'Encountered an error while parsing {}: {}'.format(os.path.basename(fullAsmFilePath), err) )
return 5, '', '', ''
# Pre-process the custom code (make sure there's no whitespace, and/or assemble it)
returnCode, preProcessedCustomCode = customCodeProcessor.preAssembleRawCode( customCode, [os.path.dirname(fullAsmFilePath)] + mod.includePaths, suppressWarnings=True )
# Check for errors
if returnCode in ( 1, 2 ):
mod.assemblyError = True
self.errors.append( 'Encountered a problem while assembling ' + os.path.basename(fullAsmFilePath) )
return returnCode, '', '', ''
elif returnCode == 3: # Missing an include file
mod.missingIncludes = preProcessedCustomCode # The custom code string will be the name of the missing include file
self.errors.append( 'Unable to find this include file: ' + preProcessedCustomCode )
return 3, '', '', ''
return 0, offset, customCode, preProcessedCustomCode
def parseAmfsInject( self, codeChangeDict, mod, sourceFile='' ):
""" AMFS Injection; custom code sourced from an assembly file. """
if not sourceFile:
sourceFile = codeChangeDict['sourceFile'] # Relative path
fullAsmFilePath = os.path.join( mod.path, sourceFile )
offset = codeChangeDict['address']
annotation = codeChangeDict.get( 'annotation', '' )
else: # No codeChangeDict if a source file was provided (this is an inject folder being processed)
fullAsmFilePath = sourceFile # This will be a full path in this case
offset = ''
annotation = ''
# Get the custom code from the ASM file and pre-process it (make sure there's no whitespace, and/or assemble it)
returnCode, offset, customCode, preProcessedCustomCode = self.getCustomCodeFromFile( fullAsmFilePath, mod, True, annotation )
# Check for errors
if returnCode != 0:
return
elif not offset:
mod.parsingError = True
self.errors.append( 'Unable to find an offset/address for ' + sourceFile )
return
# Normalize the offset of the code change, and get the game's original code at that location
dolOffset = normalizeDolOffset( offset, dolObj=mod.vanillaDol )
origHex = getVanillaHex( dolOffset, revision=mod.revision, suppressWarnings=False )
if not origHex:
mod.missingVanillaHex = True
self.errors.append( 'Unable to find vanilla hex for {}. Found in {}.'.format(offset, sourceFile) )
return
# Get the custom code's length, and store the info for this code change
customCodeLength = getCustomCodeLength( preProcessedCustomCode )
if customCodeLength == 4:
mod.data[mod.revision].append( ('static', customCodeLength, offset, origHex, customCode, preProcessedCustomCode) )
else:
mod.data[mod.revision].append( ('injection', customCodeLength, offset, origHex, customCode, preProcessedCustomCode) )
def parseAmfsReplaceCodeBlock( self, codeChangeDict, mod ):
""" AMFS Long Static Overwrite of variable length. """
# Pre-process the custom code (make sure there's no whitespace, and/or assemble it)
sourceFile = codeChangeDict['sourceFile'] # Expected to be there
annotation = codeChangeDict.get( 'annotation', '' ) # May not be there
fullAsmFilePath = '\\\\?\\' + os.path.normpath( os.path.join( mod.path, sourceFile ))
# Get the custom code from the ASM file and pre-process it (make sure there's no whitespace, and/or assemble it)
returnCode, offset, customCode, preProcessedCustomCode = self.getCustomCodeFromFile( fullAsmFilePath, mod, False, annotation )
if returnCode != 0: return
# Get the offset of the code change, and the original code at that location
offset = codeChangeDict['address']
dolOffset = normalizeDolOffset( offset, dolObj=mod.vanillaDol )
origHex = getVanillaHex( dolOffset, revision=mod.revision, suppressWarnings=False )
if not origHex:
mod.missingVanillaHex = True
self.errors.append( 'Unable to find vanilla hex for ' + offset )
return
# Get the custom code's length, and store the info for this code change
customCodeLength = getCustomCodeLength( preProcessedCustomCode )
mod.data[mod.revision].append( ('static', customCodeLength, offset, origHex, customCode, preProcessedCustomCode) )
def processAmfsInjectSubfolder( self, fullFolderPath, mod, isRecursive ):
""" Processes all files/folders in a directory """
try:
for item in os.listdir( fullFolderPath ):
itemPath = os.path.join( fullFolderPath, item )
if os.path.isdir( itemPath ) and isRecursive:
self.processAmfsInjectSubfolder( itemPath, mod, isRecursive )
elif itemPath.endswith( '.asm' ):
self.parseAmfsInject( None, mod, sourceFile=itemPath )
except WindowsError as err:
mod.parsingError = True
self.errors.append( 'Unable to find the folder "{}"'.format(fullFolderPath) )
print err
def parseAmfsInjectFolder( self, codeChangeDict, mod ):
# Get/construct the root folder path
sourceFolder = codeChangeDict['sourceFolder']
sourceFolderPath = os.path.join( mod.path, sourceFolder )
# try:
self.processAmfsInjectSubfolder( sourceFolderPath, mod, codeChangeDict['isRecursive'] )
# except WindowsError as err:
# # Try again with extended path formatting
# print 'second try for', sourceFolderPath
# self.processAmfsInjectSubfolder( '\\\\?\\' + os.path.normpath(sourceFolderPath), mod, codeChangeDict['isRecursive'] )
def clearModsLibraryTab():
""" Clears the Mods Library tab's GUI (removes buttons and deletes mod modules) """
# Remove the Mods Library selection button from the GUI
librarySelectionLabel.place_forget()
# Delete all mods currently populated in the GUI (by deleting the associated tab),
# and remove any other current widgets/labels in the main notebook
for child in modsLibraryNotebook.winfo_children():
child.destroy()
# Remove any description text ('Click this button to....')
for child in modsLibraryTab.mainRow.winfo_children():
if child.winfo_class() == 'TLabel' and child != librarySelectionLabel:
child.destroy()
def placeModLibrarySelectionBtn( placeLibrarySelectionBtnDescriptor ):
if len( modsLibraryNotebook.tabs() ) > 0: # There is space in the usual place
librarySelectionLabel['bg'] = 'SystemButtonFace'
librarySelectionLabel.place( anchor='e', relx=1, x=-13, y=16 )
else: # No tabs means no usual space for this button
librarySelectionLabel['bg'] = 'white'
librarySelectionLabel.place( anchor='e', relx=1, x=-64, y=66 )
if placeLibrarySelectionBtnDescriptor:
btnDescriptor = ttk.Label( modsLibraryTab.mainRow, text='Click this button to select a directory for your Mods Library -->', background='white' )
btnDescriptor.place( anchor='e', relx=1, x=-120, y=66 )
def scanModsLibrary( playAudio=True ):
""" The primary function for processing a Mods Library. Will identify and parse the standard .txt file mod format,
as well as the AMFS structure. The primary .include paths for import statements are also set here.
Include Path Priority:
1) The current working directory (usually the MCM root folder)
2) Directory of the mod's code file (or the code's root folder with AMFS)
3) The current Mods Library's ".include" directory
4) The MCM root folder's ".include" directory """
# If this scan is triggered while it is already running, queue/wait for the previous iteration to cancel and re-run
if modsLibraryNotebook.isScanning:
modsLibraryNotebook.stopToRescan = True
return
tic = time.clock()
# Remember the currently selected tab and its scroll position.
currentTab = getCurrentModsLibraryTab()
lastSelectedTabFileSource = ''
if currentTab != 'emptyNotebook':
frameForBorder = currentTab.winfo_children()[0]
modsPanelInterior = frameForBorder.winfo_children()[0].interior # frameForBorder -> modsPanel.interior
lastSelectedTabFileSource = modsPanelInterior.winfo_children()[0].sourceFile # Checking the first mod of the mods panel (should all have same source file)
sliderYPos = frameForBorder.winfo_children()[0].vscrollbar.get()[0] # .get() returns e.g. (0.49505277044854884, 0.6767810026385225)
genGlobals['allMods'] = []
genGlobals['allModNames'] = Set()
modsLibraryFolder = getModsFolderPath()
# Build the list of paths for .include script imports (this will be prepended by the folder housing each mod text file)
includePaths = [ os.path.join(modsLibraryFolder, '.include'), os.path.join(scriptHomeFolder, '.include') ]
clearModsLibraryTab()
# Validate the current Mods Library folder
if not os.path.exists( modsLibraryFolder ):
msg( 'Unable to find the "' + os.path.split(modsLibraryFolder)[1] + '" folder.' )
ttk.Label( modsLibraryNotebook, text='No Mods Library directory found.', background='white' ).place( relx=.5, rely=.5, anchor='center' )
ttk.Label( modsLibraryNotebook, image=imageBank['randall'], background='white' ).place( relx=0.5, rely=0.5, anchor='n', y=15 ) # y not :P
placeModLibrarySelectionBtn( True )
return
modsLibraryNotebook.isScanning = True
parentFrames = {} # Tracks tabs that mod modules are attached to (their parents), so the frames can be added to at any time
# Load settings from the settings.py file
loadRegionOverwriteOptions()
def processItemInDir( parentDirectory, parentNotebook, fileOrFolderItem ):
itemPath = os.path.normpath( os.path.join(parentDirectory, fileOrFolderItem) )
# Create a new tab for the parent notebook for this item (folder or text file).
newTab = Frame( parentNotebook )
if os.path.isdir( itemPath ):
# Scan this folder to determine if it's of the new ASM Mod Folder Structure (AMFS) Amphis/Amfis?
folderItems = os.listdir( itemPath )
if 'codes.json' in folderItems:
# Open the json file and get its file contents (need to do this early so we can check for a mod category)
with open( os.path.join(itemPath, 'codes.json'), 'r') as jsonFile:
jsonContents = json.load( jsonFile )
# Check for a mod category
category = jsonContents.get( 'category', 'Uncategorized' )
if category in parentFrames:
ModsLibraryParser( parentFrames[category], [itemPath] + includePaths ).parseAmfs( fileOrFolderItem, itemPath, jsonContents )
else: # Need to create a new parent frame and notebook
parentNotebook.add( newTab, text=category )
# Create a space for new modules, and then parse the file to populate it.
frameForBorder = Frame( newTab, borderwidth=2, relief='groove' )
modsPanel = VerticalScrolledFrame( frameForBorder )
# Code modules will go here, as children of a modsPanel.
modsPanel.pack( side='left', fill='both', expand=1 )
frameForBorder.place( x=0, y=0, relwidth=.65, relheight=1.0 )
ModsLibraryParser( modsPanel.interior, [itemPath] + includePaths ).parseAmfs( fileOrFolderItem, itemPath, jsonContents )
parentFrames[category] = modsPanel.interior
# If this tab was selected before this scan, reselect it and restore the previous scroll position.
if itemPath == lastSelectedTabFileSource:
if selectModLibraryTab( newTab ): # Ensures all tabs leading to this tab are all selected (multiple may be nested)
modsPanel.canvas.yview_moveto( sliderYPos )
else:
parentNotebook.add( newTab, image=imageBank['folderIcon'], text=fileOrFolderItem, compound='left' )
# Create a new Notebook object (tab group) for this directory.
newNotebook = ttk.Notebook( newTab )
newNotebook.pack( fill='both', expand=1 )
newNotebook.bind( '<<NotebookTabChanged>>', onTabChange )
processDirectory( itemPath, newNotebook ) # Recursive fun!
elif itemPath.lower().endswith('.txt'): # This tab will be for a file.
tabName = fileOrFolderItem[:-4] # Removes '.txt'/'.TXT' from the string.
parentNotebook.add( newTab, text=tabName )
# Create a space for new modules, and then parse the file to populate it.
frameForBorder = Frame( newTab, borderwidth=2, relief='groove' )
modsPanel = VerticalScrolledFrame( frameForBorder )
# Code modules will go here, as children of a modsPanel.
modsPanel.pack( side='left', fill='both', expand=1 )
frameForBorder.place( x=0, y=0, relwidth=.65, relheight=1.0 )
# Add all mod definitions from this file to the GUI
parseModsLibraryFile( itemPath, modsPanel.interior, [parentDirectory] + includePaths )
parentFrames[tabName] = modsPanel.interior
# Update the GUI
modsLibraryNotebook.update()
# If this tab was selected before this scan, reselect it and restore the previous scroll position.
if itemPath == lastSelectedTabFileSource:
if selectModLibraryTab( newTab ): # Ensures all tabs leading to this tab are all selected (multiple may be nested)
modsPanel.canvas.yview_moveto( sliderYPos )
def processDirectory( parentDirectory, parentNotebook ):
itemsInDir = os.listdir( parentDirectory )
somethingCreated = False
# Check if there are any items in this folder to be processed exclusively (item starting with '+')
for fileOrFolderItem in itemsInDir:
if fileOrFolderItem.startswith( '+' ):
processItemInDir( parentDirectory, parentNotebook, fileOrFolderItem )
somethingCreated = True
break
else: # Loop above didn't break; no item with '+' found, so process everything in this folder
for fileOrFolderItem in itemsInDir:
if modsLibraryNotebook.stopToRescan:
break
elif fileOrFolderItem.startswith( '!' ) or fileOrFolderItem.startswith( '.' ):
continue # User can optionally exclude these folders from parsing
else:
processItemInDir( parentDirectory, parentNotebook, fileOrFolderItem )
somethingCreated = True
if not somethingCreated:
# Nothing in this folder. Add a label to convey this.
Label( parentNotebook, text='No text files found here.', bg='white' ).place( relx=.5, rely=.5, anchor='center' )
ttk.Label( parentNotebook, image=imageBank['randall'], background='white' ).place( relx=0.5, rely=0.5, anchor='n', y=15 ) # y not :P
processDirectory( modsLibraryFolder, modsLibraryNotebook )
def restartScan( playAudio ):
time.sleep( .2 ) # Give a moment to allow for current settings to be saved via saveOptions.
modsLibraryNotebook.isScanning = False
modsLibraryNotebook.stopToRescan = False
scanModsLibrary( playAudio )
if modsLibraryNotebook.stopToRescan:
restartScan( playAudio )
else:
toc = time.clock()
print 'library parsing time:', toc - tic
# Add the Mods Library selection button to the GUI
placeModLibrarySelectionBtn( False )
totalModsInLibraryLabel.set( 'Total Mods in Library: ' + str(len( genGlobals['allMods'] )) ) # todo: refactor code to count mods in the modsPanels instead
#totalSFsInLibraryLabel.set( 'Total Standalone Functions in Library: ' + str(len( collectAllStandaloneFunctions(genGlobals['allMods'], forAllRevisions=True) )) )
realignControlPanel()
root.bind_class( 'moduleClickTag', '<1>', modModuleClicked )
if dol.data:
collectAllStandaloneFunctions()
checkForEnabledCodes()
if playAudio: playSound( 'menuChange' )
# Check once more if another scan is queued. (e.g. if the scan mods button was pressed again while checking for installed mods)
if modsLibraryNotebook.stopToRescan:
restartScan( playAudio )
else:
modsLibraryNotebook.isScanning = False
modsLibraryNotebook.stopToRescan = False
def parseGeckoCode( gameRevision, codeText ):
""" Returns a tuple of 'title', 'author(s)', and the mod's code changes'.
The code changes list is in the same format as those parsed by codes in MCM's
usual format, and created internally as 'ModModule' modData dictionary entries. """
title = authors = ''
description = []
codeChanges = [] # A Codechange = a tuple of ( changeType, customCodeLength, offset, originalCode, customCode, preProcessedCustomCode )
codeBuffer = [] # Temp staging area while code lines are collected, before they are submitted to the above codeChanges list.
# Load the DOL for this revision (if one is not already loaded), for original/vanilla code look-ups
vanillaDol = loadVanillaDol( gameRevision )
for line in codeText.splitlines():
if not line: continue
elif line.startswith( '*' ): # [Another?] form of comment
description.append( line[1:] )
elif line.startswith( '$' ) or ( '[' in line and ']' in line ):
line = line.lstrip( '$' )
# Sanity check; the buffer should be empty if a new code is starting
if codeBuffer:
print 'Warning! Gecko code parsing ran into an error or an invalid code!'
print 'The code buffer was not emptied before a new code was encountered.'
codeBuffer = []
if title: # It's already been set, meaning this is another separate code
break
if '[' in line and ']' in line:
titleParts = line.split( '[' )
authors = titleParts[-1].split( ']' )[0]
title = '['.join( titleParts[:-1] )
else:
title = line
elif codeBuffer: # Multi-line code collection is in-progress
changeType, customCodeLength, ramAddress, _, collectedCodeLength = codeBuffer
newHex = ''.join( line.split( '#' )[0].split() ) # Should remove all comments and whitespace
newHexLength = len( newHex ) / 2 # Divide by 2 to count by bytes rather than nibbles
if collectedCodeLength + newHexLength < customCodeLength:
codeBuffer[3].append( newHex )
codeBuffer[4] += newHexLength
else: # Last line to collect from for this code change
# Collect the remaining new hex and consolidate it
bytesRemaining = customCodeLength - collectedCodeLength
codeBuffer[3].append( newHex[:bytesRemaining*2] ) # x2 to count by nibbles
rawCustomCode = ''.join( codeBuffer[3] ) # no whitespace
customCode = customCodeProcessor.beautifyHex( rawCustomCode ) # Formats to 8 byte per line
# Get the original/vanilla code
intRamAddress = int( ramAddress[3:], 16 ) # Trims off leading 0x8 before conversion
dolOffset = offsetInDOL( intRamAddress, vanillaDol.sectionInfo )
if dolOffset == -1: originalCode = ''
else: originalCode = vanillaDol.data[dolOffset*2:(dolOffset+4)*2].upper() # At the injection point
# Add the finished code change to the list, and reset the buffer
codeChanges.append( (changeType, customCodeLength, ramAddress, originalCode, customCode, rawCustomCode) )
codeBuffer = []
elif line.startswith( '04' ): # A Static Overwrite
ramAddress = '0x80' + line[2:8]
customCode = line.replace( ' ', '' )[8:16]
# Get the vanilla code from the DOL
dolOffset = offsetInDOL( int(line[2:8], 16), vanillaDol.sectionInfo )
if dolOffset == -1: originalCode = ''
else: originalCode = vanillaDol.data[dolOffset*2:(dolOffset+4)*2].upper()
codeChanges.append( ('static', 4, ramAddress, originalCode, customCode, customCode) )
elif line.startswith( '06' ): # A Long Static Overwrite (string write)
ramAddress = '0x80' + line[2:8]
totalBytes = int( line.replace( ' ', '' )[8:16], 16 )
# Set up the code buffer, which will be filled with data until it's gathered all the bytes
codeBuffer = [ 'static', totalBytes, ramAddress, [], 0 ]
elif line.upper().startswith( 'C2' ): # An Injection
ramAddress = '0x80' + line[2:8]
totalBytes = int( line.replace( ' ', '' )[8:16], 16 ) * 8 # The count in the C2 line is a number of lines, where each line should be 8 bytes
# Set up the code buffer, which will be filled with data until it's gathered all the bytes
codeBuffer = [ 'injection', totalBytes, ramAddress, [], 0 ]
return title, authors, '\n'.join( description ), codeChanges
def getGameSettingValue( widgetSettingID, settingsSource ):
if settingsSource == 'fromDOL' and not dol.revision:
settingsSource = 'vMeleeDefaults' # No DOL has been loaded; can't read from it
if settingsSource == 'fromDOL': # Get the setting's current value from the DOL.
# Determine the offset for the setting's value
relOffset = gameSettingsTable[widgetSettingID][0]
offset = ( settingsTableOffset[dol.revision] + relOffset ) *2 # Doubled to count by nibbles rather than bytes.
if widgetSettingID == 'damageRatioSetting': value = int( dol.data[offset:offset+2], 16 ) / 10.0
elif widgetSettingID == 'stageToggleSetting' or widgetSettingID == 'itemToggleSetting': value = dol.data[offset:offset+8]
elif widgetSettingID == 'itemFrequencySetting':
value = dol.data[offset:offset+2]
if value.upper() == 'FF': value = 0
else: value = int( value, 16 ) + 1 # Accounts for skipping 'None'
else: value = int( dol.data[offset:offset+2], 16 )
elif settingsSource == 'vMeleeDefaults': value = gameSettingsTable[widgetSettingID][1]
else: value = gameSettingsTable[widgetSettingID][2]
# If the value needs to be one of the strings, use the provided value to get the position of the needed string.
if len( gameSettingsTable[widgetSettingID] ) > 3: value = gameSettingsTable[widgetSettingID][3+value]
return str( value )
def updateMeleeToVanillaGameSettings():
updateDefaultGameSettingsTab( 'vMeleeDefaults' )
checkForPendingChanges()
playSound( 'menuChange' )
def updateMeleeToTournamentGameSettings():
updateDefaultGameSettingsTab( 'tourneyDefaults' )
checkForPendingChanges()
playSound( 'menuChange' )
def updateDefaultGameSettingsTab( settingsSource ):
""" Called when an ISO/DOL is loaded, or a set of default settings are selected, also
called by checkForEnabledCodes or the buttons for loading default setting presets.
"settingsSource" may be 'fromDOL', 'vMeleeDefaults', or 'tourneyDefaults'. """
for widgetSettingID in gameSettingsTable:
value = getGameSettingValue( widgetSettingID, settingsSource )
# Update the textvariable that will be used to track changes (both internally, and for the GUI)
currentGameSettingsValues[widgetSettingID].set( value ) # Will be converted to a string if it was an int
# Update the control widget's appearance.
if settingsSource != 'fromDOL':
widgetControlID = widgetSettingID[:-7] + 'Control' # Each of these strings corresponds to a widget
updateDefaultGameSettingWidget( widgetControlID, value, False )
# Update the elements in the stage/item selections windows.
if settingsSource == 'fromDOL': fromDOL = True
else: fromDOL = False
if root.stageSelectionsWindow: root.stageSelectionsWindow.updateStates( currentGameSettingsValues['stageToggleSetting'].get(), fromDOL, False )
if root.itemSelectionsWindow: root.itemSelectionsWindow.updateStates( currentGameSettingsValues['itemToggleSetting'].get(), fromDOL, False )
def getVanillaHex( offset, byteCount=4, revision='', suppressWarnings=True ):
""" Gets the original game code at a specified offset, based on the given game region and version (revision).
The game code comes from the DOLs available in the "Original DOLs" folder. """
if not revision:
revision = dol.revision
vanillaDol = loadVanillaDol( revision=revision, suppressWarnings=suppressWarnings )
if vanillaDol and vanillaDol.data:
vanillaCode = vanillaDol.data[offset*2:(offset+byteCount)*2] # Doubling offsets to count by nibbles (since it's a hex string)
else: vanillaCode = ''
return vanillaCode
def loadVanillaDol( revision='', suppressWarnings=False ):
""" This will load and return the specified vanilla or "Original" DOL into memory (from those available in the "Original DOLs" folder),
used for various text/data section checks and/or vanilla code look-ups. May be called multiple times, but will only load the
initial DOL once. The revision of the DOL currently loaded for editing is the default revision to load if one is not specified. """
if not revision:
revision = dol.revision
if 'ALL' in revision and len( originalDols ) > 0: # DOL loading not required, as the code request should be the same for any revision (so we can use any already loaded).
return next( originalDols.itervalues() ) # Just get any, doesn't matter which
elif revision in originalDols: # originalDols is a global dictionary, containing DOL data for vanilla code lookups
return originalDols[revision]
else:
if 'ALL' in revision:
# Grab any valid DOL found, or actually look for ALL.dol if none were found (latter case not really expected)
validDols = listValidOriginalDols()
if validDols: filename = listValidOriginalDols()[0] + '.dol'
else: filename = revision + '.dol'
dolPath = dolsFolder + '\\' + filename
else:
filename = revision + '.dol'
dolPath = dolsFolder + '\\' + filename
if not os.path.exists( dolPath ):
if not suppressWarnings:
msg( 'An original DOL for the DOL revision of ' + revision + ' was not found in the original DOLs folder. '
'In order for the program to restore regions and look up vanilla code or properties, you must place an original copy of the '
'DOL here:\n\n' + dolsFolder + '\n\nThe filename should be "[region] [version].dol", for example, "NTSC 1.02.dol"', 'Unable to find original DOL' )
return None
originalDols[revision] = dolInitializer()
originalDols[revision].load( dolPath )
return originalDols[revision]
def listValidOriginalDols():
""" Returns a list of what original DOLs are available in the "Original DOLs" folder. """
dolVariations = []
for file in os.listdir( dolsFolder ):
filenameWithExt = os.path.basename( file )
filename, ext = os.path.splitext( filenameWithExt )
if ext.lower() == '.dol':
regionInfo = filename.split() # splits on whitespace by default
if len( regionInfo ) > 1:
region, version = regionInfo[0].upper(), regionInfo[1]
# Validate the name to make sure it's something the program is familiar with.
if region == 'NTSC' or region == 'PAL' and version.startswith( '1.' ):
dolVariations.append( region + ' ' + version )
return dolVariations
def problemWithDol():
""" Makes sure there is a DOL (or disc) file loaded, and that the path to it
is still valid. Returns True if a problem is encountered. """
if not dol.path or not dol.data:
msg( 'No ISO or DOL file has been loaded.' )
return True
elif not os.path.exists( dol.path ):
msg( 'Unable to locate the '+ dol.type.upper() + '! Be sure that it has not been moved or deleted.' )
return True
else: return False
def regionsOverlap( regionList ):
""" Checks selected custom code regions to make sure they do not overlap one another. """
overlapDetected = False
# Compare each region to every other region.
for i, regionEndPoints in enumerate( regionList, start=1 ):
regionStart, regionEnd = regionEndPoints
# Loop over the remaining items in the list (starting from second entry on first iteration, third entry from second iteration, etc),
# so as not to compare to itself, or make any repeated comparisons.
for nextRegionStart, nextRegionEnd in regionList[i:]:
# Check if these two regions overlap by any amount
if nextRegionStart < regionEnd and regionStart < nextRegionEnd: # The regions overlap by some amount.
overlapDetected = True
# Determine the names of the overlapping regions, and report this to the user
msg( 'Warning! One or more regions enabled for use overlap each other. The first overlapping areas detected are (' + hex(regionStart) + ', ' + hex(regionEnd) + ') and '
'(' + hex(nextRegionStart) + ', ' + hex(nextRegionEnd) + '). (There may be more; resolve this case and try again to find others.) '
'\n\nThese regions cannot be used in tandem. In the Code-Space Options window, please choose other regions, or deselect one of '
'the regions that uses one of the areas shown above.', 'Region Overlap Detected' )
break
if overlapDetected: break
return overlapDetected
#=================================#
# ~ ~ Mod Analysis & Processing ~ ~ #
#=================================#
def modModuleClicked( event ):
""" Handles click events on mod modules, and toggles their install state
(i.e. whether or not it should be installed when the user hits save). """
# Get the widget of the main frame for the module (i.e. the modModule frame, "self")
modState = None
mod = event.widget
failsafe = 0
while not modState:
mod = mod.master # Move upward through the GUI heirarchy until the mod module is found
modState = getattr( mod, 'state', None )
assert failsafe < 3, 'Unable to process click event on modModule; no mod module found.'
failsafe += 1
# Toggle the state of the module
if modState and modState != 'unavailable':
# Offer a warning if the user is trying to disable the crash printout code
if settingsFile.alwaysEnableCrashReports and mod.name == "Enable OSReport Print on Crash" and (modState == 'pendingEnable' or modState == 'enabled'):
if not tkMessageBox.askyesno( 'Confirm Disabling Crash Printout', 'This mod is very useful for debugging crashes '
'and is therefore enabled by default. You can easily disable this behavior by opening the '
'"settings.py" file in a text editor and setting the option "alwaysEnableCrashReports" '
'to False, or by removing the "Enable OSReport Print on Crash" code from your library '
"(or comment it out so it's not picked up by MCM).\n\nAre you sure you'd like to disable this mod?" ):
return # Return if the user hit "No" (they don't want to disable the mod)
if modState == 'pendingEnable': state = 'disabled'
elif modState == 'pendingDisable':
if mod.type == 'gecko' and not overwriteOptions[ 'EnableGeckoCodes' ].get():
msg( 'This mod includes a Gecko code, which are disabled.' )
return # Exits now; don't change the state or check for changes
else: state = 'enabled'
elif modState == 'enabled': state = 'pendingDisable'
elif modState == 'disabled': state = 'pendingEnable'
else: state = 'disabled' # Failsafe reset.
playSound( 'menuChange' )
mod.setState( state )
checkForPendingChanges()
def selectAllMods( event ):
""" Called by the 'Select All' button on the GUI. Only applies to the current tab. """
currentTab = getCurrentModsLibraryTab()
if currentTab != 'emptyNotebook':
frameForBorder = currentTab.winfo_children()[0]
scrollingFrame = frameForBorder.winfo_children()[0]
geckoCodesEnabled = overwriteOptions[ 'EnableGeckoCodes' ].get()
for mod in scrollingFrame.interior.winfo_children():
if mod.state == 'pendingDisable':
if geckoCodesEnabled or mod.type != 'gecko': # Skips Gecko codes if they're disabled.
mod.setState( 'enabled' )
elif mod.state == 'disabled': mod.setState( 'pendingEnable' )
playSound( 'menuChange' )
checkForPendingChanges()
def deselectAllMods( event ):
""" Called by the 'Deselect All' button on the GUI. Only applies to the current tab. """
currentTab = getCurrentModsLibraryTab()
if currentTab != 'emptyNotebook':
frameForBorder = currentTab.winfo_children()[0]
scrollingFrame = frameForBorder.winfo_children()[0]
for mod in scrollingFrame.interior.winfo_children():
if mod.state == 'pendingEnable': mod.setState( 'disabled' )
elif mod.state == 'enabled': mod.setState( 'pendingDisable' )
playSound( 'menuChange' )
checkForPendingChanges()
def selectWholeLibrary( event ):
""" Recursively scans all Mods Library tabs (notebook widgets), and selects all
mods that are not currently 'unavailable'. """
geckoCodesEnabled = overwriteOptions[ 'EnableGeckoCodes' ].get()
for mod in genGlobals['allMods']:
if mod.state == 'pendingDisable':
if geckoCodesEnabled or mod.type != 'gecko': # Skips Gecko codes if they're disabled.
mod.setState( 'enabled' )
if mod.state == 'disabled': mod.setState( 'pendingEnable' )
playSound( 'menuChange' )
checkForPendingChanges()
def deselectWholeLibrary( event ):
""" Recursively scans all Mods Library tabs (notebook widgets), and deselects all
mods that are not currently 'unavailable'. """
for mod in genGlobals['allMods']:
if mod.state == 'pendingEnable': mod.setState( 'disabled' )
elif mod.state == 'enabled': mod.setState( 'pendingDisable' )
playSound( 'menuChange' )
checkForPendingChanges()
def offsetInEnabledRegions( dolOffset ):
""" Checks if a DOL offset falls within an area reserved for custom code.
Returns tuple( bool:inEnabledRegion, string:regionNameFoundIn ) """
inEnabledRegion = False
regionNameFoundIn = ''
for regionName, regions in dol.customCodeRegions.items():
# Scan the regions for the offset
for regionStart, regionEnd in regions:
if dolOffset < regionEnd and dolOffset >= regionStart: # Target region found
if not inEnabledRegion:
inEnabledRegion = overwriteOptions[regionName].get()
regionNameFoundIn = regionName
# In a perfect world, we could break from the loop here, but it may still
# be in another region that is enabled (i.e. there's some region overlap).
return ( inEnabledRegion, regionNameFoundIn )
def getModCodeChanges( mod, forAllRevisions=False ):
""" Gets all code changes required for a mod to be installed. """
codeChanges = []
if forAllRevisions:
for changes in mod.data.values():
codeChanges.extend( changes )
else:
# Get code changes that are applicable to all revisions, as well as those applicable to just the currently loaded revision
codeChanges.extend( mod.data.get('ALL', []) )
codeChanges.extend( mod.data.get(dol.revision, []) )
return codeChanges
def codeIsAssembly( codeLines ):
""" For purposes of final code processing (resolving custom syntaxes), special syntaxes
will be resolved to assembly, so they will also count as assembly here. """
isAssembly = False
onlySpecialSyntaxes = True
for wholeLine in codeLines:
# Strip off and ignore comments
line = wholeLine.split( '#' )[0].strip()
if line == '': continue
elif isSpecialBranchSyntax( line ) or containsPointerSymbol( line ):
continue # These will later be resolved to assembly
onlySpecialSyntaxes = False
if not validHex( ''.join(line.split()) ): # Whitespace is excluded from the check
isAssembly = True
if onlySpecialSyntaxes:
isAssembly = True
return isAssembly
def getCustomCodeLength( customCode, preProcess=False, includePaths=None ):
""" Returns a byte count for custom code, though it is first calculated here in terms of nibbles. Custom syntaxes may be included.
Processing is easiest with hex strings (without whitespace), though input can be ASM if preProcess=True.
Example inputs:
'3C60801A60634340'
or
'3C60801A60634340|S|sbs__b <someFunction>|S|3C60801A48006044' <- includes some special branch syntax. """
if preProcess: # The input is assembly and needs to be assembled into hexadecimal, or it has whitespace that needs removal
customCode = customCodeProcessor.preAssembleRawCode( customCode, includePaths )[1]
if '|S|' in customCode: # Indicates a special syntax is present
length = 0
for section in customCode.split( '|S|' ):
if section.startswith( 'sbs__' ) or section.startswith( 'sym__' ): length += 8 # expected to be 4 bytes once assembled
else: length += len( section )
else:
length = len( customCode )
return length / 2
def collectAllStandaloneFunctions( forAllRevisions=False ): # depricate forAllRevisions arg?
""" Gets all standalone functions in the mods library (across all codes).
This is dependent on the Mods Library as well as the currently loaded DOL's revision.
Storage is in the form of a dictionary, as key=functionName, value=(offset, customCode, preProcessedCustomCode) """
functionsDict = {}
for mod in genGlobals['allMods']:
if mod.type == 'standalone': # Has at least one SF; skip mods that aren't of this type
for codeChange in getModCodeChanges( mod, forAllRevisions ):
if codeChange[0] == 'standalone': # codeChange = ( changeType, customCodeLength, offset, originalCode, customCode, preProcessedCustomCode )
functionName = codeChange[2]
if not functionName in functionsDict:
functionsDict[ functionName ] = ( -1, codeChange[4], codeChange[5] )
# If the function is already in the dictionary, make sure any copies found are identical (because the first copy found will be used for all mods calling it)
elif functionsDict[ functionName ][2] != codeChange[5]:
msg( 'Warning! Differing versions of the standalone function, "' + functionName + '", have been detected! '
'Only the first variation found will be used. '
'\n\nIf these are meant to be different functions, they must have different names. If they are meant to be the same function, '
'they must have the same code. (Note that as few as only one of the enabled mods requiring this funtion needs to define it.) ' )
genGlobals['allStandaloneFunctions'] = functionsDict
def parseCodeForStandalones( preProcessedCode, requiredFunctions, missingFunctions ):
""" Recursive helper function for getRequiredStandaloneFunctionNames(). Checks
one particular code change (injection/overwrite) for standalone functions. """
if '|S|' in preProcessedCode:
for section in preProcessedCode.split( '|S|' ):
if section.startswith( 'sbs__' ) and '<' in section and '>' in section: # Special Branch Syntax; one name expected
newFunctionNames = ( section.split( '<' )[1].split( '>' )[0], ) # Second split prevents capturing comments following on the same line.
elif section.startswith( 'sym__' ): # Assume could have multiple names
newFunctionNames = []
for fragment in section.split( '<<' ):
if '>>' in fragment: # A symbol (function name) is in this string segment.
newFunctionNames.append( fragment.split( '>>' )[0] )
else: continue
for functionName in newFunctionNames:
if functionName in requiredFunctions: continue # This function has already been analyzed
requiredFunctions.add( functionName )
# Recursively check for more functions that this function may reference
if functionName in genGlobals['allStandaloneFunctions']:
parseCodeForStandalones( genGlobals['allStandaloneFunctions'][functionName][2], requiredFunctions, missingFunctions )
else:
missingFunctions.add( functionName )
return requiredFunctions, missingFunctions
def getRequiredStandaloneFunctionNames( mod ):
""" Gets the names of all standalone functions a particular mod requires.
Returns a list of these function names, as well as a list of any missing functions. """
functionNames = set()
missingFunctions = set()
# This loop will be over a list of tuples (code changes) for a specific game version.
for codeChange in getModCodeChanges( mod ):
if codeChange[0] != 'gecko': #todo allow gecko codes to have SFs
functionNames, missingFunctions = parseCodeForStandalones( codeChange[5], functionNames, missingFunctions ) # codeChange[5] is preProcessedCustomCode
return list( functionNames ), list( missingFunctions ) # functionNames will also include those that are missing
def checkGeckoInfrastructure():
""" While gecko.environmentSupported reveals whether MCM is properly configured to process/install Gecko codes (within the
settings file), this function checks whether a DOL has Gecko code parts installed within it, by checking for the Gecko
codehandler hook and the codelist. """
installed = False
codelistArea = ''
if gecko.environmentSupported:
# Get the hex at the codehandler hook location currently in the DOL
hexAtHookOffset = dol.data[gecko.hookOffset*2:gecko.hookOffset*2+8]
# Get the vanilla/default hex at that same location for this DOL
vanillaHexAtHookOffset = getVanillaHex( gecko.hookOffset )
if not vanillaHexAtHookOffset: # Unable to retrieve the hex
msg( 'Unable to confirm the installation of Gecko codes. To do so, you must place an original copy of the DOL here:\n'
'(Filename should be "[region] [version].dol", for example, "NTSC 1.02.dol")\n\n' + dolsFolder, 'Unable to find an original copy of the DOL' )
elif hexAtHookOffset != vanillaHexAtHookOffset: # If these are different, the gecko codehandler hook must be installed.
codelistRegionData = dol.data[gecko.codelistRegionStart*2:gecko.codelistRegionEnd*2].lower()
# Check for the codelist wrapper
if '00d0c0de00d0c0de' in codelistRegionData and 'f000000000000000' in codelistRegionData:
codelistArea = codelistRegionData.split('00d0c0de00d0c0de')[1].split('f000000000000000')[0]
installed = True
addToInstallationSummary( geckoInfrastructure=True )
return ( installed, codelistArea )
def customCodeInDOL( startingOffset, customCode, freeSpaceCodeArea, excludeLastCommand=False, startOffsetUnknown=False ):
""" Essentially tries to mismatch any of a code change's custom code with the custom code in the DOL. Besides simply
checking injection site code, this can check custom injection code, even if it includes unknown special branch syntaxes.
This is much more reliable than simply checking whether the hex at an injection site is vanilla or not because it's
possible that more than one mod could target the same location (so we have to see which mod the installed custom code
belongs to). If custom code is mostly or entirely composed of custom syntaxes, we'll give it the benefit of the doubt
and assume it's installed (since at this point there is no way to know what bytes a custom syntax may resolve into). """
matchOffset = startingOffset
offset = startingOffset
if excludeLastCommand: customCode = customCode[:-8] # Excludes the branch back on injection mods.
# Map each code section to the code in the DOL to see if they match up.
for section in customCode.lower().split( '|s|' ): # Need to use lowercase s instead of |S| here
if section == '': continue
# Ignore custom syntaxes
elif section.startswith( 'sbs__' ) or section.startswith( 'sym__' ):
offset += 4
continue
sectionLength = len( section ) / 2
if startOffsetUnknown: # Occurs with Gecko codes & standalone functions, since we have no branch to locate them.
# This is the first normal (non-special-branch) section for this code. Since the offset is unknown,
# use this section to find all possible locations/matches for this code within the region.
matches = findAll( freeSpaceCodeArea.lower(), section, charIncrement=2 ) # charIncrement set to 2 so we increment by byte rather than by nibble
matchOffset = -1
# Iterate over the possible locations/matches, and check if each may be the code we're looking for (note that starting offsets will be known in these checks)
for matchingOffset in matches:
subMatchOffset = customCodeInDOL( (matchingOffset/2) - offset, customCode, freeSpaceCodeArea ) # "- offset" accomodates for potential preceding special branches
if subMatchOffset != -1: # The full code was found
matchOffset = subMatchOffset
break
break
else:
codeInDol = freeSpaceCodeArea[offset*2:(offset+sectionLength)*2].lower()
if section != codeInDol:
matchOffset = -1
break # Mismatch detected, meaning this is not the same (custom) code in the DOL.
else:
offset += sectionLength
return matchOffset
def checkForEnabledCodes( userPromptedForGeckoUsage=False ):
""" Checks the currently loaded DOL file for which mods are installed, and sets their states accordingly.
'userPromptedForGeckoUsage' will only come into play if there are Gecko codes detected as installed. """
loadRegionOverwriteOptions()
clearSummaryTab() # Clears the summary tab's lists of installed mods/SFs.
allEnabledCodeRegions = getCustomCodeRegions()
# Preliminary checks for Gecko codes
geckoInfrastructureInstalled, codelistArea = checkGeckoInfrastructure()
standaloneFunctionsInstalled = Set()
functionOnlyModules = [] # Remember some info on modules composed of only standalone functions
geckoCodesAllowed = overwriteOptions[ 'EnableGeckoCodes' ].get()
requiredDisabledRegions = []
# Primary Mod-Detection pass. Set the state (highlighting & notes) of each module based on whether its codes are found in the DOL.
for mod in genGlobals['allMods']:
# Cancel this scan if a new scan of the Mods Library is queued
if modsLibraryNotebook.stopToRescan:
break
# Skip unavailable non-gecko mods (gecko mods are a special case, to be re-evaluated)
elif mod.state == 'unavailable' and mod.type != 'gecko':
continue
# Disable mods that are not applicable to the currently loaded DOL
elif dol.revision not in mod.data:
mod.setState( 'unavailable' )
continue
# Determine if the mod is in the DOL, and set the state of the module respectively.
included = True
functionsOnly = True
functionsIncluded = []
summaryReport = [] # Used to track and report installation locations/offsets to the Summary tab
for changeType, customCodeLength, offsetString, originalCode, _, preProcessedCustomCode in getModCodeChanges( mod ):
if functionsOnly and not changeType == 'standalone': functionsOnly = False
# Convert the offset to a DOL Offset integer (even if it was a RAM Address)
if changeType != 'standalone' and changeType != 'gecko':
offset = normalizeDolOffset( offsetString )
# Validate the offset
if offset == -1:
msg( 'A problem was detected with the mod "' + mod.name + '"; an offset for one of its code changes (' + offsetString + ') could '
"not be parsed or processed. If you're sure it's written correctly, it appears to fall out of range of this game's DOL." )
included = False
break
# Validate the original game code, removing whitespace if necessary
elif not validHex( originalCode ):
# Get rid of whitepace and try it again
originalCode = ''.join( originalCode.split() )
if not validHex( originalCode ):
msg( 'A problem was detected with the mod "' + mod.name + '"; it appears that one of its static overwrites '
'or injection points contains invalid hex (or could not be assembled). It will be assumed that this mod is disabled.' )
included = False
break # Even though there is "if not included: break" at the end of this loop, this is still needed here to prevent checking the next if block
if changeType == 'static':
# Check whether the vanilla hex for this code change matches what's in the DOL
matchOffset = customCodeInDOL( offset, preProcessedCustomCode, dol.data )
if matchOffset == -1: included = False
else:
# Check whether this overwrite would land in an area reserved for custom code. If so, assume it should be disabled.
for codeRegion in allEnabledCodeRegions:
if offset >= codeRegion[0] and offset < codeRegion[1]:
included = False
break
else: # loop above didn't break; all checks out
summaryReport.append( ('Code overwrite', changeType, offset, customCodeLength) ) # changeName, changeType, dolOffset, customCodeLength
elif changeType == 'injection':
# Test the injection point against the original, vanilla game code.
injectionPointCode = dol.data[offset*2:offset*2+8]
commandByte = injectionPointCode[:2].lower()
if injectionPointCode.lower() == originalCode.lower():
included = False
elif not ( commandByte == '48' or commandByte == '49' or commandByte == '4a' or commandByte == '4b' ):
included = False # Not a branch added by MCM! Something else must have changed this location.
else: # There appears to be a branch at the injection site (and isn't original hex). Check to see if it leads to the expected custom code.
customCodeOffset = getBranchTargetDolOffset( offset, injectionPointCode )
inEnabledRegion, regionNameFoundIn = offsetInEnabledRegions( customCodeOffset )
if inEnabledRegion:
matchOffset = customCodeInDOL( customCodeOffset, preProcessedCustomCode, dol.data, excludeLastCommand=True ) #todo narrow search field to improve performance
# If there was a good match on the custom code, remember where this code change is for a summary on this mod's installation
if matchOffset == -1: included = False
else:
summaryReport.append( ('Branch', 'static', offset, 4) ) # changeName, changeType, dolOffset, customCodeLength
summaryReport.append( ('Injection code', changeType, customCodeOffset, customCodeLength) )
else:
included = False
print '\nPossible phantom mod;', mod.name, 'may have custom code installed to a disabled region: "' + regionNameFoundIn + '"'
print 'it was led to by an injection point hex of', injectionPointCode, 'at', hex(offset), 'which points to DOL offset', hex(customCodeOffset)
if regionNameFoundIn != '':
if regionNameFoundIn not in requiredDisabledRegions:
requiredDisabledRegions.append( regionNameFoundIn )
else:
print 'Custom code at', hex(offset), 'seems to be pointing to a region not defined for custom code!'
elif changeType == 'gecko':
if not gecko.environmentSupported: # These aren't available for this DOL
included = False
elif not geckoInfrastructureInstalled: # Not installed
included = False
else: # Check if the code is installed (present in the codelist area)
matchOffset = customCodeInDOL( 0, preProcessedCustomCode, codelistArea, startOffsetUnknown=True )
if matchOffset == -1: # Code not found in the DOL
included = False
else: # Code found to be installed!
# If using the Gecko regions is not enabled, ask the user if they'd like to allow Gecko codes.
if not geckoCodesAllowed and not userPromptedForGeckoUsage: # The second boolean here ensure this message only appears once throughout all of these loops.
userPromptedForGeckoUsage = True
# If this is Melee, add some details to the message
if dol.isMelee and ( gecko.codelistRegion == 'DebugModeRegion' or gecko.codehandlerRegion == 'DebugModeRegion'
or gecko.codelistRegion == 'Debug Mode Region' or gecko.codehandlerRegion == 'Debug Mode Region' ):
meleeDetails = ( "Mostly, this just means that you wouldn't be able to use the vanilla Debug Menu "
"(if you're not sure what that means, then you're probably not using the Debug Menu, and you can just click yes). " )
else: meleeDetails = ''
promptToUser = ( 'Gecko codes have been found to be installed, however the "Enable Gecko Codes" option is not selected.'
'\n\nEnabling Gecko codes means that the regions defined for Gecko codes, ' + gecko.codelistRegion + ' and ' + gecko.codehandlerRegion + ', will be reserved '
"(i.e. may be partially or fully overwritten) for custom code. " + meleeDetails + 'Regions that you have '
'enabled for use can be viewed and modified by clicking on the "Code-Space Options" button. '
'\n\nIf you do not enable Gecko codes, those that are already installed will be removed upon saving! Would you like to enable '
'these regions for overwrites in order to use Gecko codes?' )
geckoCodesAllowed = willUserAllowGecko( promptToUser, False, root )
if geckoCodesAllowed: # This option has been toggled! Re-run this function to ensure all Gecko mod states are properly set
print 'performing gecko codes re-scan'
checkForEnabledCodes( True )
return
if geckoCodesAllowed:
dolOffset = gecko.codelistRegionStart + 8 + matchOffset
summaryReport.append( ('Gecko code', changeType, dolOffset, customCodeLength) )
else:
included = False
elif changeType == 'standalone':
functionsIncluded.append( offsetString )
if not included:
break
# Prepare special processing for the unique case of this module being composed of ONLY standalone functions (at least for this dol revision)
if included and functionsOnly:
functionOnlyModules.append( (mod, functionsIncluded) )
continue
# Check that all standalone functions this mod requires are present.
elif included:
requiredStandaloneFunctions, missingFunctions = getRequiredStandaloneFunctionNames( mod )
if missingFunctions:
included = False
msg( 'These standalone functions required for "' + mod.name + '" could not be found in the Mods Library:\n\n' + grammarfyList(missingFunctions) )
else:
# First, check whether the required SFs can be found in the enabled custom code regions
for functionName in requiredStandaloneFunctions:
preProcessedFunctionCode = genGlobals['allStandaloneFunctions'][ functionName ][2]
for areaStart, areaEnd in allEnabledCodeRegions:
matchOffset = customCodeInDOL( 0, preProcessedFunctionCode, dol.data[areaStart*2:areaEnd*2], startOffsetUnknown=True )
if matchOffset != -1: # Function found
summaryReport.append( ('SF: ' + functionName, 'standalone', areaStart + matchOffset, getCustomCodeLength( preProcessedFunctionCode )) )
break
else: # The loop scanning through the free code regions above didn't break; SF was not found.
# Check whether the function is in a disabled region
found = False
for regionName, regions in dol.customCodeRegions.items():
if regionName in overwriteOptions and not overwriteOptions[regionName].get():
# Scan the regions for the offset
for regionStart, regionEnd in regions:
matchOffset = customCodeInDOL( 0, preProcessedFunctionCode, dol.data[regionStart*2:regionEnd*2], startOffsetUnknown=True )
if matchOffset != -1: # Function found (in a disabled region!)
print 'SF for', mod.name + ', "' + functionName + '", found in a disabled region.'
if not regionName in requiredDisabledRegions: requiredDisabledRegions.append( regionName )
found = True
break
if found: break
# Even if found in a disabled region, consider not installed for now. User will be prompted for a rescan if custom code is in disabled regions
included = False
break
if included:
mod.setState( 'enabled' )
standaloneFunctionsInstalled.update( requiredStandaloneFunctions ) # This is a set, so only new names are added.
addToInstallationSummary( mod.name, mod.type, summaryReport )
elif changeType == 'gecko' and not geckoCodesAllowed:
mod.setState( 'unavailable' )
elif settingsFile.alwaysEnableCrashReports and mod.name == "Enable OSReport Print on Crash":
# Queue this to be installed
if mod.state != 'pendingEnable':
mod.setState( 'pendingEnable' )
else:
mod.setState( 'disabled' )
# Finished checking for mods (end of allMods loop).
# Ask to enable regions that appear to have custom code
if requiredDisabledRegions:
if len( requiredDisabledRegions ) == 1:
enableDisabledRegionsMessage = ( "It looks like mods may be installed to the " + requiredDisabledRegions[0]
+ ", which is currently disabled.\n\nWould you like to enable it?" )
else:
enableDisabledRegionsMessage = ( "It looks like mods may be installed to the following custom code regions, which "
'are currently disabled:\n\n' + ', '.join(requiredDisabledRegions) + '\n\nWould you like to enable them?' )
# Prompt with the above message and wait for an answer
enableSuspectRegions = tkMessageBox.askyesno( 'Enable Custom Code Regions?', enableDisabledRegionsMessage )
if enableSuspectRegions:
# Enable the required regions and re-scan
for regionName in requiredDisabledRegions:
overwriteOptions[regionName].set( True )
# Save these region options to file (since this file is read before scanning for codes), and then re-scan
saveOptions()
# Re-scan the Mods Library if that's what this function call was a result of, or if not, just re-check for mods
if modsLibraryNotebook.isScanning:
modsLibraryNotebook.stopToRescan = True
else:
checkForEnabledCodes()
# Check modules that ONLY have standalone functions. Check if they have any functions that are installed
for mod, functionsThisModIncludes in functionOnlyModules:
for functionName in functionsThisModIncludes:
if functionName in standaloneFunctionsInstalled: # This module contains a used function!
mod.setState( 'enabled' ) # Already automatically added to the Standalone Functions table in the Summary Tab
break # only takes one to make it count
else: # loop didn't break; no functions for this mod used
mod.setState( 'disabled' )
# Make sure a new scan isn't queued before finalizing
if not modsLibraryNotebook.stopToRescan:
updateSummaryTabTotals()
# If this is SSBM, enable the Default Game Settings tab and update it with what is currently set in this DOL
if dol.isMelee:
if not dol.revision in settingsTableOffset:
msg( '"' + dol.revision + '" is an unrecognized SSBM revision.' )
return
else:
mainNotebook.tab( 2, state='normal' ) # The Default Game Settings tab
updateDefaultGameSettingsTab( 'fromDOL' )
else: # Default Game Settings can't be set
mainNotebook.tab( 2, state='disabled' ) # The Default Game Settings tab
mainNotebook.select( 0 )
checkForPendingChanges()
def checkForPendingChanges( changesArePending=False, playAudio=False ):
""" Checks for total code space usage (and updates that information on the GUI),
and makes sure that mods do not attempt to write into space (i.e. via static
overwrite or injection site branch) reserved for custom code. Also checks to
make sure that any standalone functions a mod requires can be found. """
if dol.data:
spaceUsed = 0 # In bytes
geckoSpaceUsed = 0 # Starts at 16 to account for the required 16 byte codelist wrapper
requiredStandaloneFunctions = set([]) # Unordered, unique entries only
toolTipColor = colorBank['freeSpaceIndicatorGood']
geckoToolTipColor = colorBank['freeSpaceIndicatorGood']
customCodeRegions = getCustomCodeRegions()
# Scan through the mods to check for pending changes and get the total required space, not counting standalone functions.
for mod in genGlobals['allMods']:
if mod.state == 'pendingEnable' or mod.state == 'pendingDisable': changesArePending = True
# Total how much space will be required, and remember the standalone functions needed.
if mod.state == 'pendingEnable' or mod.state == 'enabled':
spaceUsedByThisMod = 0
geckoSpaceUsedByThisMod = 0
for codeChange in getModCodeChanges( mod ):
changeType, customCodeLength, offset, _, _, _ = codeChange
if changeType == 'gecko': # Shouldn't be possible to have these enabled or pendingEnable if the environment doesn't support them.
geckoSpaceUsedByThisMod += customCodeLength
elif changeType == 'static' or changeType == 'injection':
offsetInt = normalizeDolOffset( offset )
# Make sure this code change isn't attempting to write to an area reserved for custom code (injection code/gecko stuff)
for rangeStart, rangeEnd in customCodeRegions:
if offsetInt >= rangeStart and offsetInt < rangeEnd:
if changeType == 'static': typeOfChange = 'a static overwrite'
else: typeOfChange = 'an injection point'
cmsg( '\nThe mod "' + mod.name + '" includes ' + typeOfChange + ' that is made to write into '
"a region that is currently reserved for custom code.\n\nThe offending change's offset "
'is ' + hex(offsetInt) + ', \nwhich falls into the reserved range of ' + hex(rangeStart) + ', ' + hex(rangeEnd) + '.'
"\n\nIf you want to use this mod, you'll need to open the 'Code-Space Options' menu, disable use of this region, "
"and restore it to the game's vanilla code (via the 'Restore' button found there).", 'Overwrite Conflict Detected!' )
mod.setState( 'disabled' )
break
else: # Loop above didn't break; no conflicts
if changeType == 'injection':
spaceUsedByThisMod += customCodeLength
if mod.state == 'disabled': break # A conflict was detected
# If this mod's state has changed, an overwrite conflict was detected. So skip it.
if mod.state == 'disabled': continue
else:
spaceUsed += spaceUsedByThisMod
geckoSpaceUsed += geckoSpaceUsedByThisMod
standaloneFunctionsRequired, missingFunctions = getRequiredStandaloneFunctionNames( mod )
if missingFunctions:
msg( 'These standalone functions required for "' + mod.name + '" could not be found in the Mods Library:\n\n' + grammarfyList(missingFunctions) )
else:
requiredStandaloneFunctions.update( standaloneFunctionsRequired )
# If there are any standalone functions used, calculate the total space required by them.
if requiredStandaloneFunctions != set([]):
for mod in genGlobals['allMods']:
if mod.state != 'unavailable':
for codeChange in getModCodeChanges( mod ):
if codeChange[0] == 'standalone' and codeChange[2] in requiredStandaloneFunctions:
spaceUsed += codeChange[1]
requiredStandaloneFunctions.difference_update( [codeChange[2]] ) # Removes this function name from the set.
if requiredStandaloneFunctions == set([]): break # Stop searching if all functions have been assessed.
else: # This loop wasn't broken, meaning there are standalone functions that weren't found.
msg( 'These standalone functions appear to be missing from the library:\n\n' + '\n'.join(requiredStandaloneFunctions) )
# If gecko codes will be used, make sure there is enough space for them, and add the codehandler and codelist wrapper to the total used space
if geckoSpaceUsed > 0:
spaceTakenByCodehandler = gecko.codehandlerLength
if geckoSpaceUsed + 16 > gecko.spaceForGeckoCodelist:
geckoToolTipColor = colorBank['freeSpaceIndicatorBad']
msg( 'Warning! There is not enough space for all of the Gecko codes selected. These are currently assigned to go '
"into the " + gecko.codelistRegion + ", which provides " + hex( gecko.spaceForGeckoCodelist ) + " bytes of "
"free space. The codes you've selected (plus the list wrapper of 16 bytes) amounts to " + hex( geckoSpaceUsed + 16 ) + ' bytes.' )
else:
geckoSpaceUsed = 0
spaceTakenByCodehandler = 0
# Get the total space available and enabled in the DOL for custom code
maxNonGeckoCustomCodeSpace = 0
for codeArea in getCustomCodeRegions( codelistStartPosShift=geckoSpaceUsed, codehandlerStartPosShift=spaceTakenByCodehandler ):
maxNonGeckoCustomCodeSpace += codeArea[1] - codeArea[0]
# Update the standard free space indicator to reflect the current available DOL space.
freeSpaceIndicator['maximum'] = maxNonGeckoCustomCodeSpace
freeSpaceUsage.set( spaceUsed )
# Ensure there is enough space for the [non-Gecko] custom code for the selected mods
if spaceUsed > maxNonGeckoCustomCodeSpace:
toolTipColor = colorBank['freeSpaceIndicatorBad']
msg( 'Warning! There is not enough space for all of the codes selected. The DOL regions currently enabled for custom code '
'amount to ' + hex(maxNonGeckoCustomCodeSpace) + ' bytes. The codes currently selected require at least ' + hex(spaceUsed) + ' bytes. '
'Not all of these will be able to be saved to your game.' )
# Calculate % used
if maxNonGeckoCustomCodeSpace == 0: percentSpaceUsed = ''
else: percentSpaceUsed = ' (' + str( round( (float(spaceUsed) / maxNonGeckoCustomCodeSpace) * 100, 2 ) ) + '%)' # last number equates to number of decimal places to show
# Add hover messages to the storage fill bars, to show exactly how much space is used
freeSpaceIndicator.toolTip.configure( text=' :: Standard Codes Free Space -\n'
'Total Available: \t' + uHex(maxNonGeckoCustomCodeSpace) + ' bytes\n'
'Total Used: \t' + uHex(spaceUsed) + ' bytes' + percentSpaceUsed + '\n'
'Remaining: \t' + uHex(maxNonGeckoCustomCodeSpace - spaceUsed) + ' bytes', bg=toolTipColor, justify='left' )
# Update the Gecko free space indicator
if 'EnableGeckoCodes' in overwriteOptions and overwriteOptions[ 'EnableGeckoCodes' ].get():
freeGeckoSpaceUsage.set( geckoSpaceUsed ) # Excludes space required by the codelist wrapper (16 bytes).
if not geckoSpaceUsed > 0:
freeGeckoSpaceIndicator.toolTip.configure( text='No Gecko codes\nare installed', bg='#cccccc', justify='center' )
else:
# Calculate % used
percentGeckoSpaceUsed = str( round(( float(geckoSpaceUsed) / (gecko.spaceForGeckoCodelist - 16) ) * 100, 2) )
# Add hover messages to the storage fill bars, to show exactly how much space is used
freeGeckoSpaceIndicator.toolTip.configure( text=' :: Gecko Codes Free Space -\n'
'Total Available: \t' + uHex(gecko.spaceForGeckoCodelist - 16) + ' bytes\n'
'Total Used: \t' + uHex(geckoSpaceUsed) + ' bytes (' + percentGeckoSpaceUsed + '%)\n'
'Remaining: \t' + uHex(gecko.spaceForGeckoCodelist - 16 - geckoSpaceUsed) + ' bytes', bg=geckoToolTipColor, justify='left' )
else: # No Gecko codes; disable the meter.
freeGeckoSpaceUsage.set( 0 )
freeGeckoSpaceIndicator.toolTip.configure( text='Gecko codes\nare disabled', bg='#cccccc', justify='center' )
# If this is SSBM, check if there are any unsaved changes on the Default Game Settings tab
if dol.isMelee and dol.revision in settingsTableOffset:
for widgetSettingID in gameSettingsTable:
newValue = currentGameSettingsValues[widgetSettingID].get()
existingValue = getGameSettingValue( widgetSettingID, 'fromDOL' )
if newValue != existingValue:
changesArePending = True
break
# Enable/disable the buttons for saving changes
if changesArePending:
saveChangesBtn.config( state='normal' )
saveChangesBtn2.config( state='normal' )
else: # No changes to save; disable the save buttons
saveChangesBtn.config( state='disabled' )
saveChangesBtn2.config( state='disabled' )
if playAudio: playSound( 'menuChange' )
updateInstalledModsTabLabel()
def willUserAllowGecko( promptToUser, toggleModStates, parent ):
""" Prompts the user to see if they'd like to enable Gecko codes. The usage-status in
overwriteOptions (the dictionary describing whether or not custom code regions
should be used) is also updated, as well as the options window GUI if it is open.
If toggleModStates is True, mod states will be reevaluated based on the new gecko setting. """
# Check whether gecko codes are allowed, either by checking the option or by prompting the user
if promptToUser:
geckoRegionsEnabled = tkMessageBox.askyesno( '', promptToUser, parent=parent )
overwriteOptions[ 'EnableGeckoCodes' ].set( geckoRegionsEnabled )
else: # The promptToUser string is empty; no message, so default to what the option is already set as
geckoRegionsEnabled = overwriteOptions[ 'EnableGeckoCodes' ].get()
# Set the global region overwrite options. If the options window is open, its checkboxes will change on/off automatically
if geckoRegionsEnabled:
overwriteOptions[ gecko.codelistRegion ].set( True )
overwriteOptions[ gecko.codehandlerRegion ].set( True )
saveOptions()
# If the Code-Space Options window is open, select or deselect the required Gecko region checkboxes
if root.optionsWindow: # The region overwrite options window is currently open
# The checkboxes for Gecko code regions should be disabled and selected if gecko codes are allowed
if geckoRegionsEnabled: widgetsState = 'disabled'
else: widgetsState = 'normal'
# Change the overwriteOption and checkboxes states (these share BooleanVars), and restore button states
for checkbox in root.optionsWindow.checkboxes:
if checkbox.regionName == gecko.codelistRegion or checkbox.regionName == gecko.codehandlerRegion:
checkbox['state'] = widgetsState
checkbox.restoreBtn.config( state=widgetsState )
# Update the GUI for mod states and enabled status.
if toggleModStates:
if geckoRegionsEnabled and dol.data: # Installed mods may already have their states altered, so mod installation needs reevaluation.
checkForEnabledCodes()
else: # The user declined enabling Gecko codes. Disable all Gecko mod modules.
for mod in genGlobals['allMods']:
if mod.type == 'gecko': mod.setState( 'unavailable' )
checkForPendingChanges() # If an installed mod was disabled, re-enable the 'save' buttons so that the user can save changes.
return geckoRegionsEnabled
def updateDefaultGameSettingWidget( widgetControlID, currentValue, checkForChanges=True ):
""" Updates the appearance of an input/button widget used for default game settings.
Called from updateDefaultGameSettingsTab, the save function, and from the
'set to game setting defaults' functions. The checkForChanges variable is used
to moderate a few things in cases where this function is called multiple times in succession. """
# Play a sound effect.
if checkForChanges == True and (widgetControlID == 'stageToggleControl' or widgetControlID == 'itemToggleControl'): # or 'Rumble' in widgetControlID
# This condition only occurs by the stage/item selection window confirm() methods.
playSound( 'menuSelect' )
elif checkForChanges:
playSound( 'menuChange' )
# Nothing more to do if there's no file loaded
if not dol.data:
return
# Get the widget that this is updating
if widgetControlID == 'itemFrequencyDisplay':
widget = root.itemSelectionsWindow.window.winfo_children()[0].winfo_children()[-3].winfo_children()[1] # Window object -> itemFrame -> cellFrame -> OptionMenu
elif 'Rumble' in widgetControlID:
if not root.rumbleSelectionWindow: return
else: widget = root.rumbleSelectionWindow.buttons[widgetControlID]
else: widget = globals()[widgetControlID]
# Get the default value of the widget (within the DOL)
default = getGameSettingValue( widgetControlID[:-7] + 'Setting', 'fromDOL' )
# Color the widget, based on whether its values have been updated.
if str( currentValue ).lower() == default.lower():
if widget.winfo_class() == 'Menubutton': # The dropdown boxes
widget['bg'] = 'SystemButtonFace'
widget['activebackground'] = 'SystemButtonFace'
if widgetControlID == 'itemFrequencyDisplay': itemToggleControl.configure(style='TButton')
elif widget.winfo_class() == 'TButton':
widget.configure(style='TButton')
else: widget['bg'] = 'SystemWindow' # likely is a 'Spinbox'
else: # The setting is new/different
if widget.winfo_class() == 'Menubutton': # The dropdown boxes
widget['bg'] = '#aaffaa'
widget['activebackground'] = '#aaffaa'
if widgetControlID == 'itemFrequencyDisplay': itemToggleControl.configure(style='pendingSave.TButton')
elif widget.winfo_class() == 'TButton':
widget.configure(style='pendingSave.TButton')
else: widget['bg'] = '#aaffaa' # likely is a 'Spinbox'
# Update the rumble options window button
if 'Rumble' in widgetControlID and root.rumbleSelectionWindow:
for button in root.rumbleSelectionWindow.buttons.values():
if button['style'] == 'pendingSave.TButton':
rumbleToggleControl.configure(style='pendingSave.TButton')
break
else: rumbleToggleControl.configure(style='TButton')
if checkForChanges: checkForPendingChanges()
#=============================#
# ~ ~ Saving & File Writing ~ ~ #
#=============================#
def saveOptions():
""" Syncs the overwriteOptions (bools for whether or not each custom code region should be used) to
the "settings" object, and then saves these settings to file, i.e. the options.ini file. """
if not settings.has_section( 'Region Overwrite Settings' ):
settings.add_section( 'Region Overwrite Settings' )
# Update settings with the currently selected checkbox variables.
for regionName, boolvar in overwriteOptions.items():
settings.set( 'Region Overwrite Settings', regionName, str(boolvar.get()) )
# Save the above, and all other options currently saved to the settings object, to theOptionsFile.
with open( genGlobals['optionsFilePath'], 'w') as theOptionsFile:
settings.write( theOptionsFile )
def exportDOL():
""" Wrapper for the Export DOL button. """
if dol.data != '':
createNewDolFile( dol.data, isoFilepath=dol.path )
def createNewDolFile( externalDolData, isoFilepath ):
""" Prompts the user for a new place to save the given DOL data, and saves it to file.
Also updates the default search directory for folder chosing operations,
and the default file format to use for various operations. """
# Prompt for a place to save the file.
isoFilename = os.path.split( isoFilepath )[1]
savePath = tkFileDialog.asksaveasfilename(
title="Where would you like to save the DOL file?",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile='Start (from ' + isoFilename[:-4] + ')',
defaultextension='.dol',
filetypes=[ ("DOL files", ".dol"), ("All files", "*") ]
)
if not savePath: # User canceled the operation
return False
saveDirectory, dolFilename = os.path.split( savePath )
settings.set( 'General Settings', 'defaultSearchDirectory', saveDirectory.encode('utf-8').strip() )
settings.set( 'General Settings', 'defaultFileFormat', 'dol' )
saveOptions()
# Save the dol data to a new file.
try:
with open( savePath, 'wb') as newDol:
newDol.write( bytearray.fromhex(externalDolData) )
msg('Done! \n\nA new DOL file, "' + dolFilename + '", was successfully created.') # here:\n\n' + saveDirectory)
return True
except Exception as e:
msg( "There was a problem while creating the DOL file: " + str(e) )
return False
def askToBackupDOL( isoFilepath ):
createBackup = tkMessageBox.askquestion( 'Create back-up?',
"Would you like to create a backup of the codes (the DOL file) \nalready in this game?",
icon='warning' )
if createBackup == 'yes':
# Get the data for the DOL currently in the disc.
with open( isoFilepath, 'rb') as isoBinary:
# Get the DOL ISO file offset, length, and data.
isoBinary.seek( 0x0420 )
thisDolOffset = toInt( isoBinary.read(4) ) # Should be 0x1E800 for v1.02
tocOffset = toInt( isoBinary.read(4) )
dolLength = tocOffset - thisDolOffset # Should be 0x438600 for vanilla v1.02
isoBinary.seek( thisDolOffset )
thisDolData = isoBinary.read( dolLength ).encode("hex")
# Save the data collected above
return createNewDolFile( thisDolData, isoFilepath )
else: return True
def importIntoISO():
""" Wrapper for the "Import into ISO" button. Prompts the user for a disc to import the currently
loaded DOL file into, asks if they'd like to back-up the DOL that's currently loaded in that
disc, and then saves the currently loaded DOL file to that disc. """
# Do nothing if no DOL is loaded
if not dol.data:
return
# Prompt for a disc image to import the DOL into.
filepath = tkFileDialog.askopenfilename(
title="Choose a disc image file to open.",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
filetypes=[ ('Disc image files', '*.iso *.gcm'), ('all files', '*.*')] )
# Do nothing if the user canceled the above prompt
if not filepath:
return
# Save default search directory setting
settings.set( 'General Settings', 'defaultSearchDirectory', os.path.split(filepath)[0].encode('utf-8').strip() )
saveOptions()
normalizedPath = os.path.normpath( filepath ).replace('{', '').replace('}', '')
# Validate the normalized path
if not os.path.exists( normalizedPath ):
msg( 'There was a problem while attemtping to import. Possibly due to the file being deleted or moved.' )
return
if askToBackupDOL( normalizedPath ):
try:
with open( normalizedPath, 'r+b') as isoBinary:
# Get the DOL ISO file offset and length for this disc
isoBinary.seek( 0x0420 )
dolOffset = toInt( isoBinary.read(4) )
tocOffset = toInt( isoBinary.read(4) )
dolLength = tocOffset - dolOffset
if len( dol.data )/2 <= dolLength:
# Save the DOL currently in memory to the target ISO.
isoBinary.seek( dolOffset )
isoBinary.write( bytearray.fromhex(dol.data) )
msg("Done! \n\nThe currently loaded DOL and any changes you've saved have \nbeen added to the selected game.")
else:
msg('Unable to import the DOL. \n\nThe DOL file is larger than the DOL space provided \nin the ISO!', 'Critical Error')
except:
msg("Unable to import the DOL. \n\nBe sure that the file is not being used by another \nprogram (like Dolphin :P).")
def saveAs():
""" Creates a new file (via dialog prompt to user), and then saves the currently
loaded DOL data to it, with the currently selected mods installed. """
originalFilename = os.path.basename( dol.path )
fileNameWithoutExt = originalFilename.rsplit( '.', 1 )[0]
newFilenameSuggestion = fileNameWithoutExt + ' - Copy.dol'
targetFile = tkFileDialog.asksaveasfilename(
title="Where would you like to save the DOL file?",
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=newFilenameSuggestion,
defaultextension='.dol',
filetypes=[ ("DOL files", ".dol"), ("All files", "*") ]
)
if targetFile == '': return # User canceled the operation
# Validate and normalize the path input
targetFile = os.path.normpath( targetFile )
targetDir = os.path.split( targetFile )[0].encode('utf-8').strip()
if not os.path.exists( targetDir ): # Failsafe
msg( '"{}" seems to be an invalid folder path!'.format(targetFile) )
return
# Remember current settings
settings.set( 'General Settings', 'defaultSearchDirectory', targetDir )
settings.set( 'General Settings', 'defaultFileFormat', 'dol' )
saveOptions()
# Redirect the path and file extension for the save operation
dol.path = targetFile
dol.type = 'dol'
# Create a new file and save the currently loaded dol data to it
open( targetFile, 'a' ).close() # Creates an empty file to write to
saveSuccessful = saveCodes()
# Reinitialize with this new file
if saveSuccessful:
readRecievedFile( targetFile, defaultProgramStatus='Changes Saved', checkForCodes=False )
def modifyDol( offsetOfChange, code, modName='' ):
""" This is used by the primary save function. It handles making changes (excluding some reversions)
to the DOL in order to track changes made to it, and notify the user of conflicts.
"Free space" regions for injection code are organized independently and not considered by this."""
newCodeLength = len( code ) / 2 # in bytes
injectionEnd = offsetOfChange + newCodeLength
conflictDetected = False
for regionStart, codeLength, modPurpose in genGlobals['modifiedRegions']:
regionEnd = regionStart + codeLength
if offsetOfChange < regionEnd and regionStart < injectionEnd: # The regions overlap by some amount.
conflictDetected = True
break
if conflictDetected:
if modName: # Case to silently fail when uninstalling a mod previously detected to have a problem
cmsg( '\nA conflict (writing overlap) was detected between these two changes:\n\n"' + \
modPurpose + '"\n\tOffset: ' + hex(regionStart) + ', Code End: ' + hex(regionEnd) + '\n\n"' + \
modName + '"\n\tOffset: ' + hex(offsetOfChange) + ', Code End: ' + hex(injectionEnd) + \
'\n\nThese cannot both be enabled. "' + modName + '" will not be enabled. "' + \
modPurpose + '" may need to be reinstalled.', 'Conflicting Changes Detected' )
else:
# No problems; save the code change to the DOL and remember this change
replaceHex( offsetOfChange, code )
genGlobals['modifiedRegions'].append( (offsetOfChange, newCodeLength, modName) )
return conflictDetected
def saveCodes(): # (i.e. the magic)
""" The primary save function, which scans through all mods for mods that should be enabled and
makes their code changes in the DOL's data. This is done in two passes; the first pass restores
vanilla code from disabled mods and collects Gecko codes, and the second pass modifies the DOL
for enabled mods."""
tic = time.clock()
# Validate the loaded DOL file (make sure one was loaded and its file path is still good)
if not dol.path or not dol.data:
msg( 'It appears that no file is loaded, good sir or madam.' )
return False
elif not os.path.exists( dol.path ):
msg( 'The given file could not be found. Possibly due to it being deleted or moved.' )
return False
genGlobals['modifiedRegions'] = [] # Used to track static overwrites and to watch for conflicts
if not onlyUpdateGameSettings.get():
# Check for conflicts among the code regions selected for use
allCodeRegions = getCustomCodeRegions()
if regionsOverlap( allCodeRegions ):
return False
# Notify the user of incompatibility between the crash printout code and the Aux Code Regions, if they're both enabled
if settingsFile.alwaysEnableCrashReports and overwriteOptions[ 'Aux Code Regions' ].get():
for mod in genGlobals['allMods']:
if mod.name == "Enable OSReport Print on Crash":
msg( 'The Aux Code Regions are currently enabled for custom code, however this area is required for the "Enable '
'OSReport Print on Crash" code to function, which is very useful for debugging crashes and is therefore '
'enabled by default. \n\nYou can easily resolve this by one of three ways: 1) disable use of the Aux Code '
'Regions (and restore that area to vanilla code), 2) Open the "settings.py" file in a text editor and set '
'the option "alwaysEnableCrashReports" to False, or 3) remove the "Enable OSReport Print on Crash" code '
"from your library (or comment it out so it's not picked up by MCM).", 'Aux Code Regions Conflict' )
return
# Update the GUI
clearSummaryTab() # Clears the summary tab's lists of installed mods/SFs.
programStatus.set( 'Gathering Preliminary Data...' )
programStatusLabel.update()
standaloneFunctionsUsed = [] # Tracks functions that actually make it into the DOL
spaceForGeckoCodelist = gecko.spaceForGeckoCodelist *2 # This is the max number of characters (nibbles) available in the region designated for the codelist (in settings.py)
geckoCodelistRegionFull = False
removingSomeGeckoCodes = False
geckoCodes = ''
totalModsToInstall = 0
geckoSummaryReport = [] # Same as other summaryReports, but with the mod object added
# Prelimanary code-saving pass; restore code from disabled mods, and collect Gecko codes
for mod in genGlobals['allMods']:
if mod.state == 'unavailable': continue
# Revert code for all disabled mods to normal vanilla Melee, to prevent possibilities of them conflicting with mods that will be enabled.
elif mod.state == 'disabled' or mod.state == 'pendingDisable':
# Process each code change tuple (each representing one change in the file) given for this mod for the current game version.
for changeType, customCodeLength, offsetString, originalCode, _, preProcessedCustomCode in getModCodeChanges( mod ):
if changeType == 'static' or changeType == 'injection':
dolOffset = normalizeDolOffset( offsetString )
originalHex = ''.join( originalCode.split() ) # Removes all line breaks & spaces. (Comments should already be removed.)
if validHex( originalHex ): replaceHex( dolOffset, originalHex )
else:
vanillaCode = getVanillaHex( dolOffset, byteCount=customCodeLength )
if not vanillaCode:
msg( 'Warning! Invalid hex was found in the original code for "' + mod.name + '", and no vanilla DOL was found in the Original DOLs folder! '
'Unable to refer to original code, which means that this mod could not be properly uninstalled.' )
else:
replaceHex( dolOffset, vanillaCode )
msg( 'Warning! Invalid hex was found in the original code for "' + mod.name + '". The original code from a vanilla DOL was used instead.')
elif changeType == 'gecko' and mod.state == 'pendingDisable':
removingSomeGeckoCodes = True # This state means they were previously enabled, which also means gecko.environmentSupported = True
# Make sure Gecko mod states are correct
if mod.state == 'pendingDisable':
if mod.type == 'gecko' and not overwriteOptions[ 'EnableGeckoCodes' ].get():
mod.setState( 'unavailable' )
else: mod.setState( 'disabled' )
# Collect the gecko codes into a string
elif mod.state == 'enabled' or mod.state == 'pendingEnable':
if mod.type == 'gecko' and gecko.environmentSupported:
if not geckoCodelistRegionFull:
for changeType, customCodeLength, _, _, _, preProcessedCustomCode in getModCodeChanges( mod ):
if changeType != 'gecko': continue
if preProcessedCustomCode != '' and validHex( preProcessedCustomCode ): # latter check will currently fail with custom syntaxes. #todo?
# Confirm that there is enough space for the Gecko codes selected.
if len( geckoCodes ) + ( customCodeLength * 2 ) + 32 <= spaceForGeckoCodelist: # 32 (i.e. 16 bytes; but counting in nibbles here) added for codelist wrapper.
geckoCodes += preProcessedCustomCode
mod.setState( 'enabled' )
dolOffset = gecko.codelistRegionStart + 8 + len( geckoCodes )/2
geckoSummaryReport.append( (mod.name, mod.type, 'Gecko code', 'gecko', dolOffset, customCodeLength) )
else:
geckoCodelistRegionFull = True
msg( "There's not enough free space for all of the Gecko codes you've selected; "
"there is currently a region of " + uHex(spaceForGeckoCodelist/2) + " bytes designated "
"for Gecko codes.", "The Region for Gecko Codes is Full" )
mod.setState( 'pendingEnable' )
break
else:
msg( 'There was an error while processing the code for "' + mod.name + '" (invalid hex or unable to process custom code).' )
break
totalModsToInstall += 1
# End of preliminary pass.
# Save the selected Gecko codes to the DOL, and determine the adjusted code regions to use for injection/standalone code.
if geckoCodes:
# Ensure that the codelist length will be a multiple of 4 bytes (so that injection code after it doesn't crash from bad branches).
wrappedGeckoCodes = '00D0C0DE00D0C0DE' + geckoCodes + 'F000000000000000'
finalGeckoCodelistLength = roundTo32( len(wrappedGeckoCodes)/2, base=4 ) # Rounds up to closest multiple of 4 bytes
paddingLength = finalGeckoCodelistLength - len(wrappedGeckoCodes)/2 # in bytes
padding = '00' * paddingLength
wrappedGeckoCodes += padding
# Need to get a new list of acceptable code regions; one that reserves space for the Gecko codelist/codehandler
allCodeRegions = getCustomCodeRegions( codelistStartPosShift=finalGeckoCodelistLength, codehandlerStartPosShift=gecko.codehandlerLength )
else: # No Gecko codes to be installed
wrappedGeckoCodes = ''
if removingSomeGeckoCodes: # Then the regions that were used for them should be restored.
if overwriteOptions[ 'EnableGeckoCodes' ].get():
restoreGeckoParts = True # If they've already enabled these regions, the following changes should be fine and we don't need to ask.
else:
restoreGeckoParts = tkMessageBox.askyesno( 'Gecko Parts Restoration', 'All Gecko codes have been removed. '
'Would you like to restore the regions used for the Gecko codehandler and codelist ({} and {}) to vanilla Melee?'.format(gecko.codehandlerRegion, gecko.codelistRegion) )
if restoreGeckoParts:
if not gecko.environmentSupported: # Failsafe; if removingSomeGeckoCodes, gecko.environmentSupported should be True
msg( 'The configuration for Gecko codes seems to be incorrect; unable to uninstall Gecko codes and restore the Gecko code regions.' )
else:
vanillaHexAtHookOffset = getVanillaHex( gecko.hookOffset )
vanillaCodelistRegion = getVanillaHex( gecko.codelistRegionStart, byteCount=gecko.spaceForGeckoCodelist )
vanillaCodehandlerRegion = getVanillaHex( gecko.codehandlerRegionStart, byteCount=gecko.spaceForGeckoCodehandler )
if not vanillaHexAtHookOffset or not vanillaCodelistRegion or not vanillaCodehandlerRegion:
msg( 'Unable to restore the original hex at the location of the codehandler hook, or the areas for the Gecko codelist and codehandler. '
'This is likely due to a missing original copy of the DOL, which should be here:\n\n' + dolsFolder + '\n\nThe filename should be "[region] [version].dol", '
'for example, "NTSC 1.02.dol". Some mods may have been uninstalled, however you will need to reselect and save the new [non-Gecko] codes that were to be installed.',
'Unable to find an original copy of the DOL' )
# Unexpected failsafe scenario. We'll be ignoring the areas occupied by most of the Gecko stuff (hook and codehandler); it will remain in-place.
# Inefficient, but what're ya gonna do. Should at least be functional.
wrappedGeckoCodes = '00D0C0DE00D0C0DEF000000000000000' # Empty codelist; still need this; maybe? #totest
allCodeRegions = getCustomCodeRegions( codelistStartPosShift=16, codehandlerStartPosShift=gecko.codehandlerLength )
addToInstallationSummary( geckoInfrastructure=True )
else:
# Remove the branch to the codehandler and return this point to the vanilla code instruction.
replaceHex( gecko.hookOffset, vanillaHexAtHookOffset)
# Restore the free space regions designated for the Gecko codelist and codehandler
replaceHex( gecko.codelistRegionStart, vanillaCodelistRegion )
replaceHex( gecko.codehandlerRegionStart, vanillaCodehandlerRegion )
# Zero-out the regions that will be used for custom code.
for regionStart, regionEnd in allCodeRegions:
regionLength = regionEnd - regionStart # in bytes
replaceHex( regionStart, '00' * regionLength )
# If this is Melee, nop branches required for using the USB Screenshot regions, if those regions are used.
if dol.isMelee and ( ('ScreenshotRegions' in overwriteOptions and overwriteOptions['ScreenshotRegions'].get())
or ('Screenshot Regions' in overwriteOptions and overwriteOptions['Screenshot Regions'].get()) ):
screenshotRegionNopSites = { 'NTSC 1.03': (0x1a1b64, 0x1a1c50), 'NTSC 1.02': (0x1a1b64, 0x1a1c50), 'NTSC 1.01': (0x1a151c, 0x1a1608),
'NTSC 1.00': (0x1a0e1c, 0x1a0f08), 'PAL 1.00': (0x1a2668, 0x1a2754) }
problemWithNop = modifyDol( screenshotRegionNopSites[dol.revision][0], '60000000', 'Screenshot Region NOP' )
problemWithNop2 = modifyDol( screenshotRegionNopSites[dol.revision][1], '60000000', 'Screenshot Region NOP' )
if problemWithNop or problemWithNop2:
msg( 'One or more NOPs for the Screenshot Region could not be added, most likely due to a conflicting mod.' )
else:
nopSummaryReport = []
nopSummaryReport.append( ('Code overwrite', 'static', screenshotRegionNopSites[dol.revision][0], 4) )
nopSummaryReport.append( ('Code overwrite', 'static', screenshotRegionNopSites[dol.revision][1], 4) )
addToInstallationSummary( 'USB Screenshot Region NOP', 'static', nopSummaryReport, isMod=False ) # , iid='screenshotRegionNops'
# Install any Gecko codes that were collected
if geckoCodes: # gecko.environmentSupported must be True
# Replace the codehandler's codelist RAM address
codelistAddress = offsetInRAM( gecko.codelistRegionStart, dol.sectionInfo ) + 0x80000000
codelistAddrBytes = struct.pack( '>I', codelistAddress ) # Packing to bytes as a big-endian unsigned int (4 bytes)
codehandlerCodelistAddr = gecko.codehandler.find( b'\x3D\xE0' ) # Offset of the first instruction to load the codelist address
gecko.codehandler[codehandlerCodelistAddr+2:codehandlerCodelistAddr+4] = codelistAddrBytes[:2] # Update first two bytes
gecko.codehandler[codehandlerCodelistAddr+6:codehandlerCodelistAddr+8] = codelistAddrBytes[2:] # Update last two bytes
# Calculate branch distance from the hook to the destination of the Gecko codehandler's code start
geckoHookDistance = calcBranchDistance( gecko.hookOffset, gecko.codehandlerRegionStart )
# Look for the first instruction in the codehandler, to offset the hook distance, if needed
codehandlerStartOffset = gecko.codehandler.find( b'\x94\x21' )
if codehandlerStartOffset != -1:
geckoHookDistance += codehandlerStartOffset
# Add the Gecko codehandler hook
geckoHook = assembleBranch( 'b', geckoHookDistance )
modifyDol( gecko.hookOffset, geckoHook, 'Gecko Codehandler Hook' )
# Add the codehandler and codelist to the DOL
modifyDol( gecko.codelistRegionStart, wrappedGeckoCodes, 'Gecko codes list' )
modifyDol( gecko.codehandlerRegionStart, hexlify(gecko.codehandler), 'Gecko Codehandler' )
else:
geckoSummaryReport = [] # May have been added to. Clear it.
def allocateSpaceInDol( dolSpaceUsedDict, customCode, customCodeLength ): # The customCode input should be preProcessed
customCodeOffset = -1
for i, ( areaStart, areaEnd ) in enumerate( allCodeRegions ):
spaceRemaining = areaEnd - areaStart - dolSpaceUsedDict['area' + str(i + 1) + 'used'] # value in bytes
if customCodeLength <= spaceRemaining:
customCodeOffset = areaStart + dolSpaceUsedDict['area' + str(i + 1) + 'used']
dolSpaceUsedDict['area' + str(i + 1) + 'used'] += customCodeLength # Updates the used area reference.
break
return customCodeOffset # In bytes
# Create a dictionary to keep track of space in the dol that's available for injection codes
dolSpaceUsed = {}
for i in range( len(allCodeRegions) ):
# Add a key/value pair to keep track of how much space is used up for each code range.
key = 'area' + str(i + 1) + 'used'
dolSpaceUsed[key] = 0
# Primary code-saving pass.
standaloneFunctions = genGlobals['allStandaloneFunctions'] # Dictionary. Key='functionName', value=( functionOffset, functionCustomCode, functionPreProcessedCustomCode )
modInstallationAttempt = 0
noSpaceRemaining = False
for mod in genGlobals['allMods']:
if mod.state == 'unavailable' or mod.type == 'gecko': continue # Gecko codes have already been processed.
elif mod.state == 'enabled' or mod.state == 'pendingEnable':
modInstallationAttempt += 1
programStatus.set( 'Installing Mods (' + str( round( (float(modInstallationAttempt) / totalModsToInstall) * 100, 1 ) ) + '%)' )
programStatusLabel.update()
problemWithMod = False
dolSpaceUsedBackup = dolSpaceUsed.copy() # This copy is used to revert changes in case there is a problem with saving this mod.
newlyMappedStandaloneFunctions = [] # Tracked so that if this mod fails any part of installation, the standalone functions dictionary can be restored (installation offsets restored to -1).
summaryReport = []
# Allocate space for required standalone functions
if not noSpaceRemaining:
# SFs are not immediately added to the DOL because they too may reference unmapped functions.
requiredStandaloneFunctions, missingFunctions = getRequiredStandaloneFunctionNames( mod )
# Now that the required standalone functions for this mod have been assigned space, add them to the DOL.
if missingFunctions:
msg( mod.name + ' cannot not be installed because the following standalone functions are missing:\n\n' + grammarfyList(missingFunctions) )
problemWithMod = True
else:
# Map any new required standalone functions to the DOL if they have not already been assigned space.
for functionName in requiredStandaloneFunctions:
functionOffset, functionCustomCode, functionPreProcessedCustomCode = standaloneFunctions[functionName]
if functionName not in standaloneFunctionsUsed: # Has not been added to the dol. Attempt to map it.
customCodeLength = getCustomCodeLength( functionPreProcessedCustomCode )
customCodeOffset = allocateSpaceInDol( dolSpaceUsed, functionPreProcessedCustomCode, customCodeLength )
if customCodeOffset == -1:
# No more space in the DOL. (Mods requiring custom code space up until this one will be still be saved.)
noSpaceRemaining = True
problemWithMod = True
msg( "There's not enough free space for all of the codes you've selected. "
"\n\nYou might want to try again after selecting fewer Injection Mods and/or Gecko codes. "
"\n\nThe regions currently designated as free space can be configured and viewed via the 'Code-Space Options' "
'button, and the "settings.py" file.', "The DOL's regions for custom code are full" )
break
else:
# Storage location determined; update the SF dictionary with an offset for it
standaloneFunctions[functionName] = ( customCodeOffset, functionCustomCode, functionPreProcessedCustomCode )
newlyMappedStandaloneFunctions.append( functionName )
else: # This mod uses one or more functions that are already allocated to go into the DOL
sfLength = getCustomCodeLength( functionPreProcessedCustomCode )
summaryReport.append( ('SF: ' + functionName, 'standalone', functionOffset, sfLength) )
if newlyMappedStandaloneFunctions:
# Add this mod's SFs to the DOL
for functionName in newlyMappedStandaloneFunctions:
functionOffset, functionCustomCode, functionPreProcessedCustomCode = standaloneFunctions[functionName]
# Process the function code to remove comments/whitespace, assemble it into bytecode if necessary, and replace custom syntaxes
returnCode, finishedCode = customCodeProcessor.resolveCustomSyntaxes( functionOffset, functionCustomCode, functionPreProcessedCustomCode, mod.includePaths )
if returnCode != 0 and returnCode != 100:
specifics = 'An error occurred while processing this custom code:\n\n' + functionCustomCode + '\n\n'
cmsg( specifics + finishedCode, 'Error 01 Resolving Custom Syntaxes' )
elif finishedCode == '' or not validHex( finishedCode ):
problemWithMod = True
msg( 'There was an error while processing the standalone function "' + functionName + '" for "' + mod.name + '" (missing '
'standalone function code, or invalid hex after processing). This mod will not be installed.\n\n' + finishedCode )
else:
problemWithMod = modifyDol( functionOffset, finishedCode, mod.name + ' standalone function' )
if problemWithMod: break
else: summaryReport.append( ('SF: ' + functionName, 'standalone', functionOffset, len(finishedCode)/2) )
# Add this mod's code changes & custom code (non-SFs) to the dol.
if not problemWithMod:
for codeChange in getModCodeChanges( mod ):
changeType, customCodeLength, offsetString, originalCode, customCode, preProcessedCustomCode = codeChange
if ( changeType == 'standalone' or changeType == 'injection' ) and noSpaceRemaining:
problemWithMod = True
break
if changeType == 'static':
dolOffset = normalizeDolOffset( offsetString )
returnCode, finishedCode = customCodeProcessor.resolveCustomSyntaxes( dolOffset, customCode, preProcessedCustomCode, mod.includePaths )
if returnCode != 0 and returnCode != 100:
specifics = 'An error occurred while processing this custom code:\n\n' + customCode + '\n\n'
cmsg( specifics + finishedCode, 'Error 02 Resolving Custom Syntaxes' )
elif not finishedCode or not validHex( finishedCode ):
msg( 'There was an error while processing code for "' + mod.name + '".' )
problemWithMod = True
break
else:
problemWithMod = modifyDol( dolOffset, finishedCode, mod.name )
if problemWithMod: break
else: summaryReport.append( ('Code overwrite', changeType, dolOffset, customCodeLength) )
elif changeType == 'injection':
injectionSite = normalizeDolOffset( offsetString )
if injectionSite < 0x100 or injectionSite > dol.maxDolOffset:
problemWithMod = True
msg('The injection site, ' + offsetString + ', for "' + mod.name + '" is out of range of the DOL.\n\nThis code will be omitted from saving.')
break
else:
if preProcessedCustomCode == '':
msg( 'There was an error while processing an injection code for "' + mod.name + '".' )
problemWithMod = True
break
else:
# Find a place for the custom code.
customCodeOffset = allocateSpaceInDol( dolSpaceUsed, preProcessedCustomCode, customCodeLength )
if customCodeOffset == -1:
# No more space in the DOL. Injection codes up to this one (and all other changes) will be still be saved.
noSpaceRemaining = True
usedSpaceString = ''
for i, regionRange in enumerate( allCodeRegions ):
( start, end ) = regionRange
if i != ( len(allCodeRegions) - 1 ): usedSpaceString = usedSpaceString + hex(start) + ' to ' + hex(end) + ', '
else: usedSpaceString = usedSpaceString + ' and ' + hex(start) + ' to ' + hex(end) + '.'
msg( "There's not enough free space for all of the codes you've selected. "
"\n\nYou might want to try again after selecting fewer Injection Mods. "
"\n\n - - - \n\nThe regions currently designated as "
"free space in the DOL are " + usedSpaceString, "The DOL's regions for custom code are full" )
break
else:
# Calculate the initial branch from the injection site
branch = assembleBranch( 'b', calcBranchDistance( injectionSite, customCodeOffset) )
# If the calculation above was successful, write the created branch into the dol file at the injection site
if branch == -1: problemWithMod = True
else: problemWithMod = modifyDol( injectionSite, branch, mod.name + ' injection site' )
if problemWithMod: break
else: summaryReport.append( ('Branch', 'static', injectionSite, 4) ) # changeName, changeType, dolOffset, customCodeLength
returnCode, preProcessedCustomCode = customCodeProcessor.resolveCustomSyntaxes( customCodeOffset, customCode, preProcessedCustomCode, mod.includePaths )
if returnCode != 0 and returnCode != 100:
specifics = 'An error occurred while processing this custom code:\n\n' + customCode + '\n\n'
cmsg( specifics + preProcessedCustomCode, 'Error 03 Resolving Custom Syntaxes' )
elif preProcessedCustomCode == '' or not validHex( preProcessedCustomCode ):
msg( 'There was an error while replacing custom branch syntaxes in an injection code for "' + mod.name + '".' )
problemWithMod = True
break
else:
# If the return code was 100, the last instruction was created by a custom branch syntax, which was deliberate and we don't want it replaced.
if returnCode == 0:
# Check if the last instruction in the custom code is a branch or zeros. If it is, replace it with a branch back to the injection site.
commandByte = preProcessedCustomCode[-8:][:-6].lower()
if commandByte == '48' or commandByte == '49' or commandByte == '4a' or commandByte == '4b' or commandByte == '00':
branchBack = assembleBranch( 'b', calcBranchDistance( (customCodeOffset + len(preProcessedCustomCode)/2 - 0x8), injectionSite) )
if branchBack == -1:
problemWithMod = True
break
else: # Success; replace the instruction with the branch created above
preProcessedCustomCode = preProcessedCustomCode[:-8] + branchBack
# Add the injection code to the DOL.
problemWithMod = modifyDol( customCodeOffset, preProcessedCustomCode, mod.name + ' injection code' )
if problemWithMod: break
else: summaryReport.append( ('Injection code', changeType, customCodeOffset, customCodeLength) )
if problemWithMod:
# Revert all changes associated with this mod to the game's vanilla code.
for codeChange in getModCodeChanges( mod ):
if codeChange[0] == 'static' or codeChange[0] == 'injection':
offset = normalizeDolOffset( codeChange[2] )
originalCode = codeChange[3]
modifyDol( offset, originalCode ) # Should silently fail if attempting to overrite what was already changed by another mod (changes existing in modifiedRegions)
# Any extra code left in the 'free space' regions will just be overwritten by further mods or will otherwise not be used.
# Restore the dictionary used for tracking free space in the DOL to its state before making changes for this mod.
dolSpaceUsed = dolSpaceUsedBackup
# Restore the standalone functions dictionary to as it was before this mod (set offsets back to -1).
for functionName in newlyMappedStandaloneFunctions:
functionOffset, functionCustomCode, functionPreProcessedCustomCode = standaloneFunctions[functionName]
standaloneFunctions[functionName] = ( -1, functionCustomCode, functionPreProcessedCustomCode )
# Update the GUI to reflect changes.
mod.setState( 'disabled' )
else:
# Remember that the standalone functions used for this mod were added to the dol.
standaloneFunctionsUsed.extend( newlyMappedStandaloneFunctions )
# Update the GUI to reflect changes.
mod.setState( 'enabled' )
addToInstallationSummary( mod.name, mod.type, summaryReport )
# End of primary code-saving pass. Finish updating the Summary tab.
if geckoSummaryReport:
addToInstallationSummary( geckoInfrastructure=True )
for geckoReport in geckoSummaryReport:
summaryReport = [ geckoReport[2:] ]
addToInstallationSummary( geckoReport[0], geckoReport[1], summaryReport )
# Check for modules composed of only standalone functions; these won't be set as enabled by the above code
if standaloneFunctionsUsed:
for mod in genGlobals['allMods']:
if mod.state == 'unavailable': continue
# Check if this is a SF-only module
functionsOnly = True
functionNames = []
for codeChange in getModCodeChanges( mod ):
if not codeChange[0] == 'standalone':
functionsOnly = False
break
else:
functionNames.append( codeChange[2] )
if functionsOnly:
# Check if this module is used
for funcName in functionNames:
if funcName in standaloneFunctionsUsed:
print 'SF-only module', mod.name, 'is detected in save routine'
mod.setState( 'enabled' ) # Already automatically added to the Standalone Functions table in the Summary Tab
break # only takes one to make it count
else: # loop didn't break; no functions for this mod used
print 'SF-only module', mod.name, 'not detected in save routine'
mod.setState( 'disabled' )
toc = time.clock()
print '\nMod library save time:', toc-tic
# End of the 'not onlyUpdateGameSettings' block
updateSummaryTabTotals()
# If this is SSBM, check the Default Game Settings tab for changes to save. (function execution skips to here if onlyUpdateGameSettings=True)
if dol.isMelee and dol.revision in settingsTableOffset:
for widgetSettingID in gameSettingsTable:
selectedValue = currentGameSettingsValues[widgetSettingID].get()
valueInDOL = getGameSettingValue( widgetSettingID, 'fromDOL' )
if selectedValue != valueInDOL:
tableOffset = gameSettingsTable[widgetSettingID][0]
fileOffset = settingsTableOffset[dol.revision] + tableOffset
# If the game settings tuple is greater than 3, it means there are strings that need to be attributed to a number that the game uses.
if widgetSettingID == 'damageRatioSetting': newHex = toHex( round(float(selectedValue) * 10, 1), 2 )
elif widgetSettingID == 'stageToggleSetting' or widgetSettingID == 'itemToggleSetting': newHex = selectedValue
elif widgetSettingID == 'itemFrequencySetting':
if selectedValue == 'None': newHex = 'FF'
else:
for i, item in enumerate( gameSettingsTable['itemFrequencySetting'][4:] ):
if item == selectedValue:
newHex = toHex( i, 2 )
break
elif len( gameSettingsTable[widgetSettingID] ) > 3: # For cases where a string is used, get the index of the current value.
for i, item in enumerate( gameSettingsTable[widgetSettingID][3:] ):
if item == selectedValue:
newHex = toHex( i, 2 )
break
else: newHex = toHex( selectedValue, 2 )
# Set the new values in the DOL and update the gui to show that this has been saved.
decameledName = convertCamelCase( widgetSettingID )
modifyDol( fileOffset, newHex, decameledName + ' (from the Default Game Settings tab)' )
widgetControlID = widgetSettingID[:-7]+'Control'
updateDefaultGameSettingWidget( widgetControlID, selectedValue, False )
# Update the stage and items selections windows (if they're open) with the new values.
if widgetSettingID == 'stageToggleSetting' and root.stageSelectionsWindow: root.stageSelectionsWindow.updateStates( selectedValue, True, False )
elif widgetSettingID == 'itemToggleSetting' and root.itemSelectionsWindow: root.itemSelectionsWindow.updateStates( selectedValue, True, False )
saveSuccessStatus = saveDolToFile()
return saveSuccessStatus
def formatAsGecko( mod, dolRevision, createForGCT ):
""" Formats a mod's code into Gecko code form. If this is for an INI file, human-readable mod-name/author headers and
whitespace are included. If this is for a GCT file, it'll just be pure hex data (though returned as a string). """
# def resolveSfReferences( preProcessedCode ): #todo finish allowing SFs in Gecko codes
# # Check for special syntaxes; only one kind can be adapted to use by Gecko codes (references to SFs)
# if '|S|' not in preProcessedCustomCode: # Contains no special syntaxes
# return preProcessedCustomCode
# for section in preProcessedCustomCode.split( '|S|' ):
# if section.startswith( 'sym__' ): # Contains a function symbol; something like 'lis r3, (<<function>>+0x40)@h'
# resolvedCode = ''
# break
# elif section.startswith( 'sbs__' ): # Something of the form 'bl 0x80001234' or 'bl <function>'; replace the latter with the function code
# if '<' in section and section.endswith( '>' ): # The syntax references a standalone function
# targetFunctionName = section.split( '<' )[1].replace( '>', '' )
# preProcessedSfCode = genGlobals['allStandaloneFunctions'][targetFunctionName][2]
# resolvedSfCode = resolveSfReferences( preProcessedSfCode )
# if
# else: break # Must be a special branch syntax using a RAM address (can't be used since we don't know where this code will be)
# else: # Success; loop above did not break
containsSpecialSyntax = False
codeChanges = []
for changeType, customCodeLength, offset, _, _, preProcessedCustomCode in mod.data[dolRevision]:
# Check for special syntaxes; only one kind can be adapted for use by Gecko codes (references to SFs)
if '|S|' in preProcessedCustomCode:
containsSpecialSyntax = True
break
elif changeType != 'gecko':
ramAddress = normalizeRamAddress( offset, dolObj=originalDols[dolRevision] )
sRamAddress = toHex( ramAddress, 6 ) # Pads a hex string to 6 characters long (extra characters added to left side)
if changeType == 'standalone': # Not supported for Gecko codes
containsSpecialSyntax = True
break
elif changeType == 'static':
if createForGCT:
if customCodeLength == 1:
codeChanges.append( '00{}000000{}'.format(sRamAddress, preProcessedCustomCode) )
elif customCodeLength == 2:
codeChanges.append( '02{}0000{}'.format(sRamAddress, preProcessedCustomCode) )
elif customCodeLength == 4:
codeChanges.append( '04{}{}'.format(sRamAddress, preProcessedCustomCode) )
else:
sByteCount = toHex( customCodeLength, 8 ) # Pads a hex string to 8 characters long (extra characters added to left side)
codeChanges.append( '06{}{}{}'.format(sRamAddress, sByteCount, preProcessedCustomCode) )
else: # Creating a human-readable INI file
if customCodeLength == 1:
codeChanges.append( '00{} 000000{}'.format(sRamAddress, preProcessedCustomCode) )
elif customCodeLength == 2:
codeChanges.append( '02{} 0000{}'.format(sRamAddress, preProcessedCustomCode) )
elif customCodeLength == 4:
codeChanges.append( '04{} {}'.format(sRamAddress, preProcessedCustomCode) )
else:
sByteCount = toHex( customCodeLength, 8 ) # Pads a hex string to 8 characters long (extra characters added to left side)
beautifiedHex = customCodeProcessor.beautifyHex( preProcessedCustomCode )
codeChanges.append( '06{} {}\n{}'.format(sRamAddress, sByteCount, beautifiedHex) )
elif changeType == 'injection':
opCode = preProcessedCustomCode[-8:][:-6].lower() # Of the last instruction
if createForGCT:
# Check the last instruction; it may be a branch placeholder, which should be replaced
if opCode in ( '48', '49', '4a', '4b', '00' ):
preProcessedCustomCode = preProcessedCustomCode[:-8]
customCodeLength -= 4
# Determine the line count and the final bytes that need to be appended
quotient, remainder = divmod( customCodeLength, 8 ) # 8 represents the final bytes per line
sLineCount = toHex( quotient + 1, 8 )
if remainder == 0: # The remainder is how many bytes extra there will be after the 'quotient' number of lines above
preProcessedCustomCode += '6000000000000000'
else:
preProcessedCustomCode += '00000000'
codeChanges.append( 'C2{}{}{}'.format(sRamAddress, sLineCount, preProcessedCustomCode) )
else: # Creating a human-readable INI file
# Check the last instruction; it may be a branch placeholder, which should be replaced
if opCode in ( '48', '49', '4a', '4b', '00' ):
beautifiedHex = customCodeProcessor.beautifyHex( preProcessedCustomCode[:-8] )
customCodeLength -= 4
else:
beautifiedHex = customCodeProcessor.beautifyHex( preProcessedCustomCode )
# Determine the line count and the final bytes that need to be appended
quotient, remainder = divmod( customCodeLength, 8 ) # 8 represents the final bytes per line
sLineCount = toHex( quotient + 1, 8 )
if remainder == 0: # The remainder is how many bytes extra there will be after the 'quotient' number of lines above
beautifiedHex += '\n60000000 00000000'
else:
beautifiedHex += ' 00000000'
codeChanges.append( 'C2{} {}\n{}'.format(sRamAddress, sLineCount, beautifiedHex ) )
elif changeType == 'gecko': # Not much going to be needed here!
if createForGCT:
codeChanges.append( preProcessedCustomCode )
else: # Creating a human-readable INI file
codeChanges.append( customCodeProcessor.beautifyHex( preProcessedCustomCode ) )
if containsSpecialSyntax:
return ''
elif createForGCT:
return ''.join( codeChanges )
else:
return '${} [{}]\n{}'.format( mod.name, mod.auth, '\n'.join(codeChanges) )
def saveGctFile():
""" Simple wrapper for the 'Save GCT' button. Creates a Gecko Code Type file
using a tweak of the function used for creating INI files. """
saveIniFile( createForGCT=True )
def saveIniFile( createForGCT=False ):
# Check that there are any mods selected
for mod in genGlobals['allMods']:
if mod.state == 'enabled' or mod.state == 'pendingEnable': break
else: # The loop above didn't break, meaning there are none selected
msg( 'No mods are selected!' )
return
# Come up with a default file name for the GCT file
if dol.gameId: initialFilename = dol.gameId
else: initialFilename = 'Codes'
# Set the file type & description
if createForGCT:
fileExt = '.gct'
fileTypeDescription = "Gecko Code Type files"
else:
fileExt = '.ini'
fileTypeDescription = "Code Initialization files"
# Get a save filepath from the user
targetFile = tkFileDialog.asksaveasfilename(
title="Where would you like to save the {} file?".format( fileExt[1:].upper() ),
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
initialfile=initialFilename,
defaultextension=fileExt,
filetypes=[ (fileTypeDescription, fileExt), ("All files", "*") ]
)
if targetFile == '': return # No filepath; user canceled
# Get the revision for this codeset
if dol.revision:
dolRevision = dol.revision
else: # Not yet known; prompt the user for it
revisionWindow = RevisionPromptWindow( 'Choose the region and game version that this codeset is for:', 'NTSC', '02' )
# Check the values gained from the user prompt (empty strings mean they closed or canceled the window)
if not revisionWindow.region or not revisionWindow.version: return
else:
dolRevision = revisionWindow.region + ' ' + revisionWindow.version
# Load the DOL for this revision (if one is not already loaded).
# This may be needed for formatting the code, in order to calculate RAM addresses
vanillaDol = loadVanillaDol( dolRevision )
if not vanillaDol: return
# Remember current settings
targetFileDir = os.path.split(targetFile)[0].encode('utf-8').strip()
settings.set( 'General Settings', 'defaultSearchDirectory', targetFileDir )
saveOptions()
# Get and format the individual mods
geckoFormattedMods = []
missingTargetRevision = []
containsSpecialSyntax = []
for mod in genGlobals['allMods']:
if mod.state == 'enabled' or mod.state == 'pendingEnable':
if dolRevision in mod.data:
geckoCodeString = formatAsGecko( mod, dolRevision, createForGCT )
if geckoCodeString == '':
containsSpecialSyntax.append( mod.name )
else:
geckoFormattedMods.append( geckoCodeString )
# Update the mod's status (appearance) so the user knows what was saved
mod.setState( 'enabled', 'Saved to ' + fileExt[1:].upper() )
else:
missingTargetRevision.append( mod.name )
# Save the text string to a GCT/INI file if any mods were able to be formatted
if geckoFormattedMods:
if createForGCT:
# Save the hex code string to the file as bytes
hexString = '00D0C0DE00D0C0DE' + ''.join( geckoFormattedMods ) + 'F000000000000000'
with open( targetFile, 'wb' ) as newFile:
newFile.write( bytearray.fromhex(hexString) )
else:
# Save as human-readable text
with open( targetFile, 'w' ) as newFile:
newFile.write( '\n\n'.join(geckoFormattedMods) )
programStatus.set( fileExt[1:].upper() + ' File Created' )
# Notify the user of any codes that could not be included
warningMessage = ''
if missingTargetRevision:
warningMessage = ( "The following mods could not be included, because they do not contain "
"code changes for the DOL revision you've selected:\n\n" + '\n'.join(missingTargetRevision) )
if containsSpecialSyntax:
warningMessage += ( "\n\nThe following mods could not be included, because they contain special syntax (such as Standalone Functions or "
"RAM symbols) which are not currently supported in " + fileExt[1:].upper() + " file creation:\n\n" + '\n'.join(containsSpecialSyntax) )
if warningMessage:
cmsg( warningMessage.lstrip() )
def saveDolToFile():
programStatus.set( '' )
operationSucceded = False
# Encode the file data from a hex string to a bytearray
try:
dol.writeSig()
newDolData = bytearray.fromhex( dol.data )
except:
msg( 'The new DOL data could not be encoded.', 'Data Encoding Error' ) # Likely due to an invalid hex character.
return operationSucceded
# Save the data to disc
if (dol.type == 'iso' or dol.type == 'gcm') and dol.offset != 0:
try:
with open( dol.path, 'r+b') as isoBinary:
isoBinary.seek( dol.offset )
isoBinary.write( newDolData )
operationSucceded = True
except: msg( "Unable to save. \n\nBe sure that the file is not being used by another \nprogram (like Dolphin :P)." )
# Save the data to an external dol file
elif dol.type == 'dol':
try:
with open( dol.path, 'wb') as newDol:
newDol.write( newDolData )
operationSucceded = True
except: msg( "Unable to save. \n\nBe sure that the file is not being used by another program." )
else: msg( "The filepath doesn't seem to be a DOL, ISO or GCM file, or the DOL's offset in the disc could not be determined.", 'Error.' )
# If the process above succeeded, perform one final check for pending changes (in this case, this will mostly just handle various GUI updates)
if operationSucceded:
playSound( 'menuSelect' )
programStatus.set( 'Changes Saved' )
checkForPendingChanges()
else:
programStatus.set( 'Changes Not Saved' )
return operationSucceded
def replaceHex( offset, newHex ):
""" Simple function to replace hex at a specific offset with new hex.
Inputs should be an int and a string, respectively. """
offset = offset * 2 # Doubled to count by nibbles rather than bytes, since the data is just a string.
codeEndPoint = offset + len( newHex )
dol.data = dol.data[:offset] + newHex + dol.data[codeEndPoint:]
def saveCurrentWork( event ):
""" Global program save function for the CTRL-S hotkey. Determines what to save based on the currently selected tab;
if on the Mods Library tab or Game Settings tab, save all current enabled or pending enabled mods and settings to
the currently loaded DOL file, or else if on the Mod Construction tab, just save work on the currently selected mod. """
currentMainNotebookTabSelected = root.nametowidget( mainNotebook.select() )
if currentMainNotebookTabSelected == modsLibraryTab or currentMainNotebookTabSelected == settingsTab:
saveCodes() # Saves current mod selection to the disc or DOL
elif currentMainNotebookTabSelected == constructionTab and len( constructionNotebook.tabs() ) != 0:
# Get the mod constructor object for the currently selected mod, and save its changes to the Mods Library
modConstructor = root.nametowidget( constructionNotebook.select() ).winfo_children()[0]
modConstructor.saveModToLibrary()
def viewDolHex():
# Check/ask for a specified hex editor to open the file in.
if not os.path.exists( settings.get( 'General Settings', 'hexEditorPath' ) ):
popupWindow = PopupEntryWindow( root, message='Please specify the full path to your hex editor. '
'(Specifying this path only needs to\nbe done once, and can be changed at any time in the settings.ini file.\nIf you have already set this, '
"the path seems to have broken.)\n\nNote that this feature only shows you a copy of the data;\nany changes made will not be saved to the file or disc."
'\n\nPro-tip: In Windows, if you hold Shift while right-clicking on a file, there appears a context menu \n'
"""option called "Copy as path". This will copy the file's full path into your clipboard. Or if it's\na shortcut, """
"""you can quickly get the full file path by right-clicking on the icon and going to Properties.""", title='Set hex editor path' )
hexEditorPath = popupWindow.entryText.replace('"', '')
if hexEditorPath != '':
# Update the path in the settings file and global variable.
settings.set( 'General Settings', 'hexEditorPath', hexEditorPath.encode('utf-8').strip() )
with open( genGlobals['optionsFilePath'], 'w') as theOptionsFile: settings.write( theOptionsFile ) # Updates a pre-existing settings file entry, or just creates a new file.
else:
hexEditorPath = settings.get( 'General Settings', 'hexEditorPath' )
if hexEditorPath != '':
if problemWithDol(): return
try:
filename = os.path.splitext( os.path.basename(dol.path) )[0]
tempFilePath = scriptHomeFolder + '\\bin\\tempFiles\\' + filename + '_temp.dol'
createFolders( os.path.split(tempFilePath)[0] )
# Save the current file data to a temporary file.
with open( tempFilePath, 'wb' ) as newFile:
newFile.write( bytearray.fromhex(dol.data) )
# Open the temp file in the user's editor of choice.
if os.path.exists( hexEditorPath ) and os.path.exists( tempFilePath ):
command = '"{}" "{}"'.format( hexEditorPath, tempFilePath )
subprocess.Popen( command, shell=False, stderr=subprocess.STDOUT, creationflags=0x08000000 ) # shell=True gives access to all shell features.
else:
msg( "Unable to find the specified hex editor program (or new DOL temporary file). You may want to double check the path saved in the options.ini file." )
except Exception as err:
msg( 'There was an unknown problem while creating the DOL temp file.' )
print err
#========================#
# ~ ~ Mods Library Tab ~ ~ #
#========================#
class LabelButton( Label ):
""" Basically a label that acts as a button, using an image and mouse click/hover events.
Used for the edit button and web links. """
def __init__( self, parent, imageName, callback, hovertext='' ):
# Get the images needed
self.nonHoverImage = imageBank.get( imageName + 'Gray' )
self.hoverImage = imageBank.get( imageName )
assert self.nonHoverImage, 'Unable to get the {} web link image.'.format( imageName )
assert self.hoverImage, 'Unable to get the {}Gray web link image.'.format( imageName )
# Initialize the label with one of the above images
Label.__init__( self, parent, image=self.nonHoverImage, borderwidth=0, highlightthickness=0, cursor='hand2' )
# Bind click and mouse hover events
self.bind( '<1>', callback )
self.bind( '<Enter>', self.darken )
self.bind( '<Leave>', self.lighten )
if hovertext:
ToolTip( self, hovertext, delay=700, wraplength=800, justify='center' )
def darken( self, event ): self['image'] = self.hoverImage
def lighten( self, event ): self['image'] = self.nonHoverImage
class ModModule( Frame ):
""" Serves as both a GUI element, and a container for all of the information on a given mod. """
def __init__( self, parent, modName, modDesc, modAuth, modData, modType, webLinks, *args, **kw ):
Frame.__init__( self, parent, *args, **kw )
self.name = modName
self.desc = modDesc
self.auth = modAuth
self.data = modData # A dictionary populated by lists of "codeChange" tuples
self.type = modType
self.state = 'disabled'
self.statusText = StringVar()
self.highlightFadeAnimationId = None # Used for the border highlight fade animation
self.webLinks = []
moduleWidth = 520 # Mostly just controls the wraplength of text areas.
# Set the mod "Length" string
if self.type == 'static':
lengthString = ''
else:
arbitraryGameVersions = []
for revision, codeChanges in modData.items():
if revision != 'ALL':
arbitraryGameVersions.extend( codeChanges )
break
if 'ALL' in modData: arbitraryGameVersions.extend( modData['ALL'] )
length = 0
for codeChange in arbitraryGameVersions:
if codeChange[0] != 'static': length += codeChange[1]
lengthString = ' Space' + unichr(160) + 'required:' + unichr(160) + uHex(length) # unichr(160) = no-break space
# Construct the GUI framework.
self.config( relief='groove', borderwidth=3, takefocus=True )
# Row 1: Title, author(s), type, and codelength.
row1 = Frame( self )
Label( row1, text=modName, font=("Times", 11, "bold"), wraplength=moduleWidth-140, anchor='n' ).pack( side='top', padx=(0,36), pady=2 ) # Right-side horizontal padding added for module type image
Label( row1, text=' - by ' + modAuth + lengthString, font=("Verdana", 8), wraplength=moduleWidth-160 ).pack( side='top', padx=(0,36) ) #Helvetica
row1.pack( side='top', fill='x', expand=1 )
# Row 2: Description.
row2 = Frame( self )
Label( row2, text=modDesc, wraplength=moduleWidth-110, padx=8, justify='left' ).pack( side='left', pady=0 )
row2.pack( side='top', fill='x', expand=1 )
# Row 3: Status text and buttons
row3 = Frame( self )
Label( row3, textvariable=self.statusText, wraplength=moduleWidth-90, padx=35, justify='left' ).pack( side='left' )
# Set a background image based on the mod type (indicator on the right-hand side of the mod)
typeIndicatorImage = imageBank.get( self.type + 'Indicator' )
if typeIndicatorImage:
bgImage = Label( self, image=typeIndicatorImage, borderwidth=0, highlightthickness=0 )
bgImage.place( relx=1, x=-10, rely=0.5, anchor='e' )
else:
print 'No image found for "' + self.type + 'Indicator' + '"!'
# Set up a left-click event to all current parts of this module (to toggle the code on/off), before adding any of the other clickable elements.
# All of these widgets are "tagged" to trigger a single event. Binding for the tag is then done in the scanModsLibrary function via 'root.bind_class'
for frame in self.winfo_children():
frame.bindtags( ('moduleClickTag',) + frame.bindtags() )
for label in frame.winfo_children():
label.bindtags( ('moduleClickTag',) + label.bindtags() )
# Add the edit button
LabelButton( row3, 'editButton', inspectMod, 'Edit or configure this mod' ).pack( side='right', padx=(5, 55), pady=6 )
# Validate web page links and create buttons for them
for origUrlString, comments in webLinks: # Items in this list are tuples of (urlString, comments)
urlObj = self.parseUrl( origUrlString )
if not urlObj: continue # A warning will have been given in the above method if this wasn't successfully parsed
self.webLinks.append( (urlObj, comments) )
# Build the button's hover text
domain = urlObj.netloc.split('.')[-2] # The netloc string will be e.g. "youtube.com" or "www.youtube.com"
url = urlObj.geturl()
hovertext = 'Go to the {}{} page...\n{}'.format( domain[0].upper(), domain[1:], url ) # Capitalizes first letter of domain
if comments:
hovertext += '\n\n' + comments.lstrip( ' #' )
# Add the button with its url attached
icon = LabelButton( row3, domain + 'Link', self.openWebPage, hovertext )
icon.url = url
icon.pack( side='right', padx=5, pady=6 )
row3.pack( side='top', fill='x', expand=1 )
def openWebPage( self, event ):
page = event.widget.url
webbrowser.open( page )
def parseUrl( self, origUrlString ):
""" Validates a given URL (string), partly based on a whitelist of allowed domains.
Returns a urlparse object if the url is valid, or None (Python default) if it isn't. """
try:
potentialLink = urlparse( origUrlString )
except Exception as err:
print 'Invalid link detected for "{}": {}'.format( self.name, err )
return
# Check the domain against the whitelist. netloc will be something like "youtube.com" or "www.youtube.com"
if potentialLink.scheme and potentialLink.netloc.split('.')[-2] in ( 'smashboards', 'github', 'youtube' ):
return potentialLink
elif not potentialLink.scheme:
print 'Invalid link detected for "{}" (no scheme): {}'.format( self.name, potentialLink )
else:
print 'Invalid link detected for "{}" (domain not allowed): {}'.format( self.name, potentialLink )
def setState( self, state, specialStatusText='' ):
""" Sets the state of the selected module, by adding a label to the module's Row 3 and
changing the background color of all associated widgets. """
stateColor = 'SystemButtonFace' # The default (disabled) colors.
textColor = '#000'
if state == 'pendingEnable':
stateColor = '#aaffaa'
self.statusText.set( 'Pending Save' )
elif state == 'pendingDisable':
stateColor = '#ee9999'
self.statusText.set( 'Pending Removal' )
elif state == 'enabled':
stateColor = '#77cc77'
self.statusText.set( '' )
elif state == 'unavailable':
stateColor = '#cccccc'
textColor = '#707070'
if self.type == 'gecko':
if not gecko.environmentSupported:
self.statusText.set( '(Gecko codes are unavailable)' )
elif 'EnableGeckoCodes' in overwriteOptions and not overwriteOptions[ 'EnableGeckoCodes' ].get():
self.statusText.set( '(Gecko codes are disabled)' )
else:
self.statusText.set( '' )
elif self.data == {}:
self.statusText.set( 'No code change data found!' )
else:
self.statusText.set( '(Unavailable for your DOL revision)' )
elif state != 'disabled':
self.statusText.set( '' )
raise Exception( 'Invalid mod state given! "' + state + '"' )
if specialStatusText:
if state == 'unavailable':
print self.name, 'made unavailable;', specialStatusText
self.statusText.set( specialStatusText )
# Change the overall background color of the module (adjusting the background color of all associated frames and labels)
for i, frame in enumerate( self.winfo_children() ):
frame['bg'] = stateColor
for j, label in enumerate( frame.winfo_children() ):
label['bg'] = stateColor
if not (i == 2 and j == 0): # This will exclude the status label.
label['fg'] = textColor
self.state = state
def getCurrentModsLibraryTab():
""" Returns the currently selected tab in the Mods Library tab. """
if modsLibraryNotebook.tabs() == ():
return 'emptyNotebook'
else:
selectedTab = root.nametowidget( modsLibraryNotebook.select() ) # This will be the tab frame.
childWidget = selectedTab.winfo_children()[0]
# If the child widget is not a frame, it's a notebook, meaning this represents a directory, and contains more files/tabs within it.
while childWidget.winfo_class() != 'Frame':
if childWidget.tabs() == (): return 'emptyNotebook'
selectedTab = root.nametowidget( childWidget.select() )
childWidget = selectedTab.winfo_children()[0]
return selectedTab
def getModsLibraryTabs():
""" Returns a list of all tab widgets within the Mods Library tab (recusively includes sub-tabs).
Each Frame widget is parent to a Frame, which in turn contains a scrollingFrame (modsPanel), which may contain mods. """
modsTabs = []
def checkTabsInNotebook( notebook ): # Recursive function to check nested child notebooks
for tabName in notebook.tabs():
tabWidget = root.nametowidget( tabName ) # This will be the tab's frame widget
childWidget = tabWidget.winfo_children()[0]
if childWidget.winfo_class() == 'TNotebook': # If it's actually a tab full of mods (repping a file), the class will be "Frame"
# Check whether this notebook is empty.
if childWidget.tabs() == (): continue # Skip this tab
else: checkTabsInNotebook( childWidget )
else: # Found a Frame widget, potentially containing mods (could still be empty)
modsTabs.append( tabWidget )
checkTabsInNotebook( modsLibraryNotebook )
return modsTabs
def realignControlPanel():
""" Updates the alignment/position of the control panel (to the right of mod lists) and the global scroll target. """
rootFrame.resizeTimer = None
# Get the VerticalScrolledFrame of the currently selected tab.
currentTab = getCurrentModsLibraryTab()
if root.nametowidget( mainNotebook.select() ) == modsLibraryTab and currentTab != 'emptyNotebook':
frameForBorder = currentTab.winfo_children()[0]
scrollingFrame = frameForBorder.winfo_children()[0]
# Get the new coordinates for the control panel frame.
root.update() # Force the GUI to update in order to get correct new widget positions & sizes.
root_X_Offset = scrollingFrame.winfo_rootx() - modsLibraryTab.mainRow.winfo_rootx() + scrollingFrame.winfo_width() + 2
root_Y_Offset = scrollingFrame.winfo_rooty() - modsLibraryTab.mainRow.winfo_rooty()
controlPanel.place( x=root_X_Offset, y=root_Y_Offset, width=currentTab.winfo_width() * .35 - 2, height=scrollingFrame.winfo_height() )
else:
controlPanel.place_forget() # Removes the control panel from GUI, without deleting it
def onTabChange( event ):
realignControlPanel()
updateInstalledModsTabLabel()
def updateInstalledModsTabLabel():
currentTab = getCurrentModsLibraryTab()
if root.nametowidget( mainNotebook.select() ) == modsLibraryTab and currentTab != 'emptyNotebook':
frameForBorder = currentTab.winfo_children()[0]
scrollingFrame = frameForBorder.winfo_children()[0] # i.e. modsPanel
enabledMods = 0
scrollingFrameChildren = scrollingFrame.interior.winfo_children()
for mod in scrollingFrameChildren:
if mod.state == 'enabled' or mod.state == 'pendingEnable': enabledMods += 1
installedModsTabLabel.set( 'Enabled on this tab: ' + str(enabledMods) + ' / ' + str(len( scrollingFrameChildren )) )
def selectModLibraryTab( targetTabWidget ):
""" Recursively selects all tabs within the Mods Library required to ensure the given target tab is visible. """
def selectTabInNotebook( notebook ): # Will recursively check child notebooks.
found = False
for tabName in notebook.tabs():
tabWidget = root.nametowidget( tabName ) # This will be the tab's frame widget.
# Check if this is the target tab, if not, check if the target tab is in a sub-tab of this tab
if tabWidget == targetTabWidget: found = True
else:
childWidget = tabWidget.winfo_children()[0]
if childWidget.winfo_class() == 'TNotebook': # If it's actually a tab full of mods (repping a file), the class will be "Frame".
# Check whether this notebook is empty. If not, scan it.
if childWidget.tabs() == (): continue # Skip this tab.
else: found = selectTabInNotebook( childWidget )
if found: # Select the current tab
notebook.select( tabWidget )
break
return found
return selectTabInNotebook( modsLibraryNotebook )
def findMod( nameFragment, matchOffset=0 ):
""" Gets the tab widget and index/position of a mod among the mods library tabs, by mod name.
nameFragment may be just a partial match. matchOffset causes the first n matches to be skipped;
positive values denote searching forwards in the library, while negative values search backwards.
Returns: tabWidget, targetMod, index """
tabWidget = None
targetMod = None
index = -1
modsLibraryTabs = getModsLibraryTabs()
# If searching through the mod library in reverse, simply reverse the tab and mod lists, and normalize the match offset
if matchOffset < 0:
modsLibraryTabs.reverse()
reverseSearch = True
matchOffset = abs( matchOffset ) - 1 # Normalizing to a positive value, so the following loop works as normal
else:
reverseSearch = False
for tab in modsLibraryTabs:
childWidget = tab.winfo_children()[0]
scrollingFrame = childWidget.winfo_children()[0] # i.e. a modsPanel
mods = scrollingFrame.interior.winfo_children()
if reverseSearch: mods.reverse()
# Search for a mod name containing the given string
for i, mod in enumerate( mods ):
if nameFragment.lower() in mod.name.lower():
# Skip this match if we're looking ahead some number of matches
if matchOffset > 0:
matchOffset -= 1
continue
# Confirm this match
targetMod = mod
if not reverseSearch: index = i
else: index = len( mods ) - i # Counting iterations from the end
break
if index != -1:
tabWidget = tab
break
return tabWidget, targetMod, index
def prevModLibraryMatch():
searchBar = modsLibraryTab.winfo_children()[1] # Second child, since this was created/added second
searchBar.matchOffset -= 1
found = searchForMod( None )
# If not found, try wrapping around
if not found:
# Reset the matchOffset and try once more from the end
searchBar.matchOffset = -1
searchForMod( None )
def nextModLibraryMatch():
searchBar = modsLibraryTab.winfo_children()[1] # Second child, since this was created/added second
searchBar.matchOffset += 1
found = searchForMod( None )
# If not found, try wrapping around
if not found:
# Reset the matchOffset and try once more from the beginning
searchBar.matchOffset = 0
searchForMod( None )
def searchForMod( event ):
""" Used by the GUI's CTRL-F / mod seach box feature. Called on each key-up event from the text entry field,
as well as the Prev./Next buttons. """
searchBar = modsLibraryTab.winfo_children()[1]
nameFragment = searchBar.winfo_children()[1].get() # Gets text from the Entry widget
# Reset the match offset if this is a brand new string
if not searchBar.lastFound in nameFragment:
searchBar.matchOffset = 0
searchBar.lastFound = ''
# Seek out a matching mod, using the current search offset
found = goToMod( nameFragment, searchBar.matchOffset ) # Returns True/False for whether the mod was found or not
if found:
searchBar.lastFound = nameFragment
return found
def goToMod( modNameText, matchOffset=0 ):
""" Searches for a mod among the Mods Library tabs, and if found, switches tabs and scrolls to it.
A highlight animation is then played on the target mod to indicate it to the user.
modNameText may be a full or partial mod name to search for. If matchOffset >= 0, the
search function will start from the beginning of the library (leftmost tab, topmost mod).
If matchOffset < 0, the search will be reversed, starting from the end of the library. Larger
offsets (positive or negative) skips that many mod matches, for next/previous functionality. """
if not modNameText:
return False
tabWidget, targetMod, index = findMod( modNameText, matchOffset )
if not tabWidget: # No matching mod found
return False
# Switch to the tab containing the mod
selectModLibraryTab( tabWidget )
# Scroll to the target mod
modsPanel = tabWidget.winfo_children()[0].winfo_children()[0] # tab -> frameForBorder -> modsPanel (VerticalScrolledFrame)
modsPanel.update_idletasks()
yOffset = targetMod.winfo_y() # Relative to the mod's parent, the modsPanel's ".interior" frame
if index != 0: yOffset -= 90 # For aesthetics (centers the mod slightly)
relativeScrollbarOffset = yOffset / float( modsPanel.interior.winfo_height() )
modsPanel.canvas.yview_moveto( relativeScrollbarOffset ) # sliderYPos
# Highlight the mod
targetMod['background'] = 'yellow'
# Stop the fade animation if it's already in-progress
if targetMod.highlightFadeAnimationId:
targetMod.after_cancel( targetMod.highlightFadeAnimationId )
# Start a new fade animation on this mod
targetMod.highlightFadeAnimationId = targetMod.after( 1500, lambda mod=targetMod: updateHighlightAnimation(mod) )
return True
def updateHighlightAnimation1( modModule ): # Depricate in favor of the polynomial function?
""" Fades a color into another color by reducing each of its RGB color channels by an amount proportional to its initial value.
Each channel is reduced by 1 / [stepSize] of its original starting value, thus all values will reach their respective target values at the same time.
i.e. Example starting color of 20, 40, 40 (RGB), with 5 steps.
Step size will then be 4, 8, 8 for the respective channels.
Step 0: Step 1: Step 2: etc...
R: 20 R: 16 R: 12
G: 40 G: 32 G: 24
B: 40 B: 32 B: 24
"""
# tic=time.clock()
steps = 5.0 # This will shift each color channel by 1 / x, for the current iteration (therefore this exponentially decreases)
# Convert the current color (a name or hex string) to an RGB tuple
encodedCurrentColor = modModule['background']
#print 'updating highlight for "' + modModule.name + '".', 'current color:', encodedCurrentColor
if not encodedCurrentColor.startswith( '#' ):
currentColor = name2rgb( encodedCurrentColor )
else:
currentColor = hex2rgb( encodedCurrentColor )
targetColor = name2rgb( 'SystemButtonFace' )
#print 'current color RGB:', currentColor, ' target color RGB:', targetColor
step_R = ( targetColor[0] - currentColor[0] ) / steps
step_G = ( targetColor[1] - currentColor[1] ) / steps
step_B = ( targetColor[2] - currentColor[2] ) / steps
#print 'step sizes:', step_R, step_G, step_B
# Check if all color channels are similar
colorsCloseInColor = True
newChannelValues = []
for i, colorStep in enumerate( (step_R, step_G, step_B) ):
#if (colorStep <= 0 and colorStep > -3) or (colorStep >= 0 and colorStep < 3):
#print 'iteration', i, ':', abs( targetColor[i] - currentColor[i] )
if abs( targetColor[i] - currentColor[i] ) < steps:
# These color channels are close
newChannelValues.append( targetColor[i] )
#continue
else:
colorsCloseInColor = False
newChannelValues.append( int(round( currentColor[i] + colorStep )) )
#break
if colorsCloseInColor:
modModule['background'] = 'SystemButtonFace'
modModule.highlightFadeAnimationId = None
else:
# One of the color channels didn't match above (it's too different between "from" and "to" colors)
# newR = int(round( currentColor[0] + step_R ))
# newG = int(round( currentColor[1] + step_G ))
# newB = int(round( currentColor[2] + step_B ))
#modModule['background'] = rgb2hex( (currentColor[0] + step_R, currentColor[1] + step_G, currentColor[2] + step_B) )
#modModule['background'] = rgb2hex( (newR, newG, newB) )
modModule['background'] = rgb2hex( newChannelValues )
modModule.highlightFadeAnimationId = modModule.after( 170, lambda mod=modModule: updateHighlightAnimation(mod) )
# toc=time.clock()
# print 'time to update color (method 1):', toc-tic
def updateHighlightAnimation( modModule, x=2 ):
""" Fades a color into another using a polynomial function. Each channel of the color is gradually changed
by a percentage of its initial value, so each color follow the same rate of change. """
# tic=time.clock()
initialColor = getattr( modModule, 'initColor', None )
if not initialColor:
# Convert the current color (a name or hex string) to an RGB tuple
encodedCurrentColor = modModule['background'] # May be a hex string or text color string
if encodedCurrentColor.startswith( '#' ):
initialColor = modModule.initColor = hex2rgb( encodedCurrentColor ) # It's a hex string like "#rrggbb"
else:
initialColor = modModule.initColor = name2rgb( encodedCurrentColor ) # A color name string like "blue"
targetColor = name2rgb( 'SystemButtonFace' )
# Determine the difference between the original color and the target color (i.e. the range of change), for each color channel
diff_R = targetColor[0] - initialColor[0]
diff_G = targetColor[1] - initialColor[1]
diff_B = targetColor[2] - initialColor[2]
#print 'diffs:', diff_R, diff_G, diff_B
# Determine the percentage/progress to apply to the range of color (this formula plots a nice curve that starts fast, but then slows exponentially)
percentChange = ( -0.0123*x**4 + 0.4865*x**3 - 7.3018*x**2 + 50.747*x - 43.45 ) / 100
#print 'percentage of diffs:', diff_R * percentChange, diff_G * percentChange, diff_B * percentChange
# Determine what to change each color channel to (its initial color plus a percentage of the range of change)
new_R = int(round( initialColor[0] + (diff_R * percentChange) ))
new_G = int(round( initialColor[1] + (diff_G * percentChange) ))
new_B = int(round( initialColor[2] + (diff_B * percentChange) ))
#print 'percent change:', percentChange, ' new color channel values:', ( new_R, new_G, new_B )
# End after 13 iterations (this is just before the peak of the polynomial plot above)
if x == 13:
modModule['background'] = 'SystemButtonFace'
modModule.highlightFadeAnimationId = None
else:
x += 1
modModule['background'] = rgb2hex( ( new_R, new_G, new_B ) )
modModule.highlightFadeAnimationId = modModule.after( 170, lambda mod=modModule, iteration=x: updateHighlightAnimation(mod, iteration) )
# toc=time.clock()
# print 'time to update color:', toc-tic
def enterSearchMode( event=None ):
""" Called by the CTRL-F keyboard shortcut; adds a search bar to the GUI so a user may search for mods by name. """
# This function may be called from tabs other than the Mod Library tab. So switch to it if we're not there.
currentTab = root.nametowidget( mainNotebook.select() )
if currentTab != modsLibraryTab: mainNotebook.select( modsLibraryTab )
modsLibraryTabRows = modsLibraryTab.winfo_children()
if len( modsLibraryTabRows ) == 1: # No search bar has been added yet.
paddingY = ( 20, 0 ) # First value = above bar, second value = below bar
searchBar = ttk.Frame( modsLibraryTab, padding=paddingY, height=0 )
searchBar.pack_propagate( False ) # So that the children will not determine/force the widget's size.
ttk.Label( searchBar, image=imageBank['searchIcon'] ).pack( side='left', padx=(11, 10) ) #, text='Search:'
entry = ttk.Entry( searchBar )
entry.pack( side='left', fill='x', expand=True, pady=(1,0) )
closeButton = ttk.Button( searchBar, text='X', width=2, command=lambda: exitSearchMode(None) )
closeButton.pack( side='right', padx=(37, 15), ipadx=3 )
ttk.Button( searchBar, text='Next', width=5, command=nextModLibraryMatch ).pack( side='right', padx=(4, 15), ipadx=3 )
ttk.Button( searchBar, text='Prev.', width=5, command=prevModLibraryMatch ).pack( side='right', padx=(15, 4), ipadx=3 )
searchBar.pack( side='top', fill='x' )
searchBar.matchOffset = 0
searchBar.lastFound = ''
searchBar.update_idletasks()
requestedHeight = closeButton.winfo_reqheight() + paddingY[0] + paddingY[1] # Asking the tallest widget in the bar
# Open the search bar with an animation
try:
totalSteps = 3
stepSize = requestedHeight / totalSteps
searchBarHeight = 0
while searchBarHeight < requestedHeight:
searchBarHeight += stepSize
searchBar.configure( height=searchBarHeight )
realignControlPanel()
except:
pass
else: # Seems there's already a search bar available
# Get the entry widget (iterates over the search bar's widgets)
potentialSearchBar = modsLibraryTabRows[1]
searchBar = potentialSearchBar
entry = None
if not getattr( potentialSearchBar, 'matchOffset', None ) == None:
for widget in potentialSearchBar.winfo_children():
if widget.winfo_class() == 'TEntry':
entry = widget
break
# Move keyboard focus to the entry widget, and bind the event handler
if entry: # Failsafe; should be found in all cases
entry.focus()
entry.bind( '<KeyRelease>', searchForMod )
def exitSearchMode( event ):
""" Triggered by pressing ESC or the 'X' button in the search bar, which then closes and removes the search bar. """
modsLibraryTabRows = modsLibraryTab.winfo_children()
if len( modsLibraryTabRows ) > 1: # The search bar is present
root.update_idletasks() # Make sure the dimentions that will be reported below are correct
searchBar = modsLibraryTabRows[1] # Children are in order that they were attached to parent.
searchBar.winfo_children()[1].unbind( '<KeyRelease>' ) # Unbinded (even though the widget is deleted soon) to prevent this from queuing
searchBarHeight = searchBar.winfo_height()
# Close the search bar with an animation
try:
totalSteps = 3
stepSize = searchBarHeight / totalSteps
while searchBarHeight > 0:
searchBarHeight -= stepSize
searchBar.configure( height=searchBarHeight )
searchBar.update_idletasks()
realignControlPanel()
except: pass
searchBar.destroy()
realignControlPanel()
class ModsLibrarySelector( basicWindow ):
""" Presents a non-modal pop-up window where the user can select a directory to load mods from. """
def __init__( self, rootWindow ):
basicWindow.__init__( self, rootWindow, 'Mods Library Selection', offsets=(160, 100) )
pathsList = getModsFolderPath( getAll=True )
pathIndex = int( settings.get('General Settings', 'modsFolderIndex') )
if pathIndex >= len( pathsList ): pathIndex = 0 # Failsafe/default
self.pathIndexVar = IntVar( value=pathIndex )
self.initialLibraryPath = ''
# Add Radio buttons for each library path option
self.pathsFrame = ttk.Frame( self.window )
for i, path in enumerate( pathsList ):
self.addLibraryOption( i, path )
if i == pathIndex:
self.initialLibraryPath = path
self.pathsFrame.pack( padx=20, pady=(20, 10), expand=True, fill='x' )
ttk.Button( self.window, text='Add Another Library Path', command=self.addPath ).pack( pady=5, ipadx=10 )
buttonsFrame = ttk.Frame( self.window )
self.okButton = ttk.Button( buttonsFrame, text='Ok', command=self.submit )
self.okButton.pack( side='left', padx=10 )
ttk.Button( buttonsFrame, text='Cancel', command=self.cancel ).pack( side='left', padx=10 )
buttonsFrame.pack( pady=10 )
# Done creating window. Pause execution of the calling function until this window is closed.
rootWindow.wait_window( self.window ) # Pauses execution of the calling function until this window is closed.
def addLibraryOption( self, buttonIndex, path ):
""" Adds a library option (radio button and label) to the GUI. """
path = os.path.normpath( path ).replace( '"', '' )
emptyWidget = Frame( self.window, relief='flat' ) # This is used as a simple workaround for the labelframe, so we can have no text label with no label gap.
optionFrame = ttk.Labelframe( self.pathsFrame, labelwidget=emptyWidget, padding=(20, 4) )
# Disable the radiobutton if the path is invalid
if os.path.exists( path ): state = 'normal'
else: state = 'disabled'
radioBtn = ttk.Radiobutton( optionFrame, text=os.path.basename(path), variable=self.pathIndexVar, value=buttonIndex, state=state )
radioBtn.pack( side='left', padx=(0, 100) )
removeBtn = ttk.Button( optionFrame, text='-', width=3, command=lambda w=optionFrame: self.removePath(w), style='red.TButton' )
ToolTip( removeBtn, text='Remove library', delay=1000, bg='#ee9999' )
removeBtn.pack( side='right' )
optionFrame.path = path
optionFrame.pack( expand=True, fill='x' )
ToolTip( optionFrame, text=path, wraplength=600, delay=1000 )
def addPath( self ):
""" Prompts the user for a mod library directory, and adds it to the GUI. """
# Prompt for a directory to load for the Mods Library.
newSelection = tkFileDialog.askdirectory(
parent=self.window,
title=( 'Choose a folder from which to load your Mods Library.\n\n'
'All mods you intend to save should be in the same library.' ),
initialdir=settings.get( 'General Settings', 'defaultSearchDirectory' ),
mustexist=True )
if newSelection: # Could be an empty string if the user canceled the operation
# Make sure this path isn't already loaded
frameChildren = self.pathsFrame.winfo_children()
for option in frameChildren:
if option.path == os.path.normpath( newSelection ):
return
self.addLibraryOption( len(frameChildren), newSelection )
# Select this library
self.pathIndexVar.set( len(frameChildren) )
def removePath( self, optionFrameToRemove ):
""" Removes a library option from the GUI and updates the index values of the remaining radio buttons. """
selectedIndex = self.pathIndexVar.get()
passedButtonToRemove = False
# If this library button was selected, reset the current selection (default to the first library)
for optionFrame in self.pathsFrame.winfo_children():
radioBtn = optionFrame.winfo_children()[0]
btnIndex = radioBtn['value']
if optionFrame == optionFrameToRemove:
if btnIndex == selectedIndex:
self.pathIndexVar.set( 0 )
elif selectedIndex > btnIndex:
self.pathIndexVar.set( selectedIndex - 1 ) # Decrement the selected index by 1, since there is one less library
passedButtonToRemove = True
continue
# Update the radio button value for all buttons beyond the one being removed
if passedButtonToRemove:
radioBtn['value'] = btnIndex - 1
optionFrameToRemove.destroy() # Should also destroys its children, including the radio button
def submit( self ):
# Validate the current selection
index = self.pathIndexVar.get()
if index >= len( self.pathsFrame.winfo_children() ):
msg( 'Invalid Mods Library Selection!' )
return
# Collect the paths and combine them into one string
libraryPaths = []
for option in self.pathsFrame.winfo_children():
libraryPaths.append( option.path )
pathsString = '"' + '","'.join( libraryPaths ) + '"'
# Save the new path(s) to the settings file
settings.set( 'General Settings', 'modsFolderPath', pathsString )
settings.set( 'General Settings', 'modsFolderIndex', str(index) )
saveOptions()
# Set the mod library selector button hover text, and close this window
currentLibraryPath = getModsFolderPath()
librarySelectionLabel.hoverText.set( 'Select Mods Library.\tCurrent library:\n' + currentLibraryPath )
self.window.destroy()
# Reload the Mods Library if a different one was selected
if currentLibraryPath != self.initialLibraryPath:
scanModsLibrary()
def cancel( self, event='' ):
self.window.destroy()
#============================#
# ~ ~ Mod Construction tab ~ ~ #
#============================#
def inspectMod( event, mod=None ):
""" Load a mod from the Mods Library tab into the Mod Construction tab. """
if not mod: # Was called by the button. Get the mod from the event object
mod = event.widget.master.master
# Select the Mod Construction tab and get the currently existing tabs
mainNotebook.select( constructionTab )
# Check if the selected mod already exists (and select that if it does)
for tab in constructionNotebook.tabs():
tabName = constructionNotebook.tab( tab, 'text' )
if tabName != mod.name: continue
tabWidget = root.nametowidget( tab )
existingModConstructor = tabWidget.winfo_children()[0]
if existingModConstructor.sourceFile == mod.sourceFile: # Make sure the library wasn't changed (and it's not just a mod by the same name)
constructionNotebook.select( tab )
break
else: # Loop above didn't break; mod not found
# Create a new tab for the Mod Construction tab, and create a new construction module within it.
newTab = ttk.Frame( constructionNotebook )
constructionNotebook.add( newTab, text=mod.name )
ModConstructor( newTab, mod ).pack( fill='both', expand=1 )
# Bring the new tab into view for the user.
constructionNotebook.select( newTab )
class ModConstructor( Frame ):
def __init__( self, parent, mod=None, *args, **kw ): # Prepare the GUI.
Frame.__init__( self, parent, *args, **kw )
self.saveStatus = StringVar()
self.saveStatus.set( '' )
self.dolVariations = []
self.undoableChanges = False # Flipped for changes that 'undo' doesn't work on. Only reverted by save operation.
self.gameVersionsNotebook = None
if mod: # This mod is being added (edited) from the Mods Library tab.
name = mod.name
auth = mod.auth
self.type = mod.type
modDescription = mod.desc
self.sourceFile = mod.sourceFile
self.fileIndex = mod.fileIndex
self.webLinks = mod.webLinks
self.includePaths = mod.includePaths
else: # This is a new mod.
name = ''
auth = ''
self.type = ''
modDescription = ''
self.sourceFile = ''
self.fileIndex = -1
self.webLinks = []
self.includePaths = [ os.path.join(getModsFolderPath(), '.include'), os.path.join(scriptHomeFolder, '.include') ]
# Top buttons row
self.buttonsFrame = Frame( self )
self.saveStatusLabel = Label( self.buttonsFrame, textvariable=self.saveStatus )
self.saveStatusLabel.pack( side='left', padx=12 )
ttk.Button( self.buttonsFrame, text='Close', command=self.closeMod, width=6 ).pack( side='right', padx=6 )
ttk.Button( self.buttonsFrame, text='Info', command=self.analyzeMod, width=6 ).pack( side='right', padx=6 )
ttk.Button( self.buttonsFrame, text='Import Gecko Code', command=self.importGeckoCode ).pack( side='right', padx=6, ipadx=6 )
self.buttonsFrame.pack( fill='x', expand=0, padx=20, pady=12, ipadx=6, anchor='ne' )
# Title and Author
row1 = Frame( self )
Label( row1, text='Title:' ).pack( side='left', padx=3 )
self.titleEntry = ttk.Entry( row1, width=56 )
self.titleEntry.insert( 0, name )
self.titleEntry.pack( side='left' )
self.initUndoHistory( self.titleEntry, name )
Label( row1, text='Author(s):' ).pack( side='left', padx=(22, 3) )
self.authorsEntry = ttk.Entry( row1, width=36 )
self.authorsEntry.insert( 0, auth )
self.authorsEntry.pack( side='left' )
self.initUndoHistory( self.authorsEntry, auth )
row1.pack( padx=20, pady=0, anchor='n' )
# Starting row 2, with Description
row2 = Frame( self )
descColumn = Frame( row2 ) # Extra frame so we can stack two items vertically in this grid cell in a unique way
Label( descColumn, text='\t\tDescription:' ).pack( anchor='w' )
self.descScrolledText = ScrolledText( descColumn, width=75, height=6, wrap='word', font='TkTextFont' )
self.descScrolledText.insert( 'end', modDescription )
self.descScrolledText.pack( fill='x', expand=True ) # todo: still not actually expanding. even removing width above doesn't help
self.initUndoHistory( self.descScrolledText, modDescription )
descColumn.grid( column=0, row=0 )
# Add the mod-change adder
lineAdders = ttk.Labelframe(row2, text=' Add a type of code change: ', padding=5)
ttk.Button( lineAdders, width=3, text='+', command=lambda: self.addCodeChangeModule( 'static' ) ).grid( row=0, column=0 )
Label( lineAdders, width=21, text='Static Overwrite' ).grid( row=0, column=1 )
ttk.Button( lineAdders, width=3, text='+', command=lambda: self.addCodeChangeModule( 'injection' ) ).grid( row=1, column=0 )
Label( lineAdders, width=21, text='Injection Mod' ).grid( row=1, column=1 )
ttk.Button(lineAdders, width=3, text='+', command=lambda: self.addCodeChangeModule( 'gecko' )).grid(row=2, column=0)
Label(lineAdders, width=21, text='Gecko/WiiRD Code').grid(row=2, column=1)
ttk.Button( lineAdders, width=3, text='+', command=lambda: self.addCodeChangeModule( 'standalone' ) ).grid( row=3, column=0 )
Label( lineAdders, width=21, text='Standalone Function' ).grid( row=3, column=1 )
lineAdders.grid( column=1, row=0 )
# Add the web links
if self.webLinks:
self.webLinksFrame = ttk.Labelframe( row2, text=' Web Links: ', padding=(0, 0, 0, 8) ) # padding = left, top, right, bottom
for urlObj, comments in self.webLinks:
self.addWebLink( urlObj, comments )
# Add the "Edit" button
# addBtn = ttk.Label( row2, text='Edit', foreground='#03f', cursor='hand2' )
# addBtn.bind( '<1>', lambda e, frame=self.webLinksFrame: WebLinksEditor(frame) )
# addBtn.place( anchor='s', relx=.937, rely=1.0, y=4 )
self.webLinksFrame.grid( column=2, row=0 )
row2.pack( fill='x', expand=True, padx=20, pady=(7, 0), anchor='n' )
# Configure the description/code-changes row, so it centers itself and expands properly on window-resize
row2.columnconfigure( 0, weight=3 )
row2.columnconfigure( (1, 2), weight=1 )
# Add the game version tabs and code changes to this module.
if mod:
for dolRevision in mod.data: # dolRevision should be a string like 'NTSC 1.00'
for codeChange in mod.data[dolRevision]:
changeType, customCodeLength, offset, originalCode, customCode, _ = codeChange
self.addCodeChangeModule( changeType, customCodeLength, offset, originalCode, customCode, dolRevision )
def initUndoHistory( self, widget, initialValue ):
""" Adds several attributes and event handlers to the given widget, for undo/redo history tracking. """
widget.undoStateTimer = None
# Create the first undo state for this widget
widget.undoStates = [initialValue]
widget.savedContents = initialValue
widget.undoStatesPosition = 0 # Index into the above list, for traversal of multiple undos/redos
# Provide this widget with event handlers for CTRL-Z, CTRL-Y
widget.bind( "<Control-z>", self.undo )
widget.bind( "<Control-y>", self.redo )
widget.bind( "<Control-Shift-y>", self.redo )
widget.bind( '<KeyRelease>', self.queueUndoStatesUpdate )
def initializeVersionNotebook( self ):
""" Creates the notebook used to house code changes, with tabs for each game revision the mod may apply to. """
def gotoWorkshop(): webbrowser.open( 'http://smashboards.com/forums/melee-workshop.271/' )
def shareButtonClicked():
modString = self.buildModString()
thisModName = constructionNotebook.tab( self.master, option='text' )
if modString != '': cmsg( '\n\n\t-==-\n\n' + modString, thisModName, 'left', (('Go to Melee Workshop', gotoWorkshop),) )
# Show the Share / Submit buttons
ttk.Button( self.buttonsFrame, text='Share', command=shareButtonClicked, width=7 ).pack( side='right', padx=6 )
ttk.Button( self.buttonsFrame, text='Save As', command=self.saveModToLibraryAs, width=8 ).pack( side='right', padx=6 )
ttk.Button( self.buttonsFrame, text='Save', command=self.saveModToLibrary, width=7 ).pack( side='right', padx=6 )
offsetView = settings.get( 'General Settings', 'offsetView' ).lstrip()[0].lower()
if offsetView.startswith( 'd' ): buttonText = 'Display RAM Addresses'
else: buttonText = 'Display DOL Offsets'
self.offsetViewBtn = ttk.Button( self.buttonsFrame, text=buttonText, command=self.switchOffsetDisplayType )
self.offsetViewBtn.pack( side='right', padx=6, ipadx=6 )
self.gameVersionsNotebook = ttk.Notebook( self )
self.gameVersionsNotebook.pack( fill='both', expand=1, anchor='n', padx=12, pady=6 )
# New hex field label
self.gameVersionsNotebook.newHexLabel = StringVar()
self.gameVersionsNotebook.newHexLabel.set( '' )
Label( self.gameVersionsNotebook, textvariable=self.gameVersionsNotebook.newHexLabel ).place( anchor='e', y=9, relx=.84 )
# Add the version adder tab.
versionChangerTab = Frame( self.gameVersionsNotebook )
self.gameVersionsNotebook.add( versionChangerTab, text=' + ' )
# Check what original DOLs are available (in the "Original DOLs" folder)
self.dolVariations = listValidOriginalDols()
self.dolVariations.append( 'ALL' )
if len( self.dolVariations ) == 1: # Only 'ALL' is present
Label( versionChangerTab, text='No DOLs were found in the "Original DOLs" folder.\nRead the "ReadMe.txt" file found there for more information.' ).pack( pady=15, anchor='center' )
else: # i.e. Some [appropriately named] dols were found in the dols folder
Label( versionChangerTab, text='Choose the game revision you would like to add changes for:\n(These are based on what you have in the "Original DOLs" folder.)' ).pack( pady=15, anchor='center' )
verChooser = StringVar()
verChooser.set( self.dolVariations[0] )
ttk.OptionMenu( versionChangerTab, verChooser, self.dolVariations[0], *self.dolVariations ).pack()
def addAnotherVersion():
tabName = 'For ' + verChooser.get()
if getTabByName( self.gameVersionsNotebook, tabName ) == -1: # Tab not found.
self.addGameVersionTab( verChooser.get(), True )
# Select the newly created tab.
self.gameVersionsNotebook.select( root.nametowidget(getTabByName( self.gameVersionsNotebook, tabName )) )
else: msg( 'A tab for that game revision already exists.' )
ttk.Button( versionChangerTab, text=' Add ', command=addAnotherVersion ).pack( pady=15 )
def addGameVersionTab( self, dolRevision, codeChangesListWillBeEmpty ):
# If this is the first code change, add the game version notebook.
if not self.gameVersionsNotebook: self.initializeVersionNotebook()
# Decide on a default revision if one is not set, and determine the game version tab name
if not dolRevision:
# This is an empty/new code change being added by the user. Determine a default tab (revision) to add it to.
if len( self.gameVersionsNotebook.tabs() ) == 1: # i.e. only the version adder tab (' + ') exists; no code changes have been added to this notebook yet
# Attempt to use the revision of the currently loaded DOL
if dol.revision in self.dolVariations:
gameVersionTabName = 'For ' + dol.revision
else:
# Attempt to use a default set in the settingsFile, or 'NTSC 1.02' if one is not set there.
defaultRev = getattr( settingsFile, 'defaultRevision', 'NTSC 1.02' ) # Last arg = default in case defaultRevision doesn't exist (old settings file)
if defaultRev in self.dolVariations:
gameVersionTabName = 'For ' + defaultRev
else: gameVersionTabName = 'For ' + self.dolVariations[0]
else: # A tab for code changes already exists. Check the name of the currently selected tab and add to that.
gameVersionTabName = self.gameVersionsNotebook.tab( self.gameVersionsNotebook.select(), "text" )
dolRevision = gameVersionTabName[4:] # Removes 'For '
else: # This code change is being populated automatically (opened from the Library tab for editing)
gameVersionTabName = 'For ' + dolRevision
# Add a new tab for this game version if not already present, and define its GUI parts to attach code change modules to.
versionTab = getTabByName( self.gameVersionsNotebook, gameVersionTabName )
if versionTab != -1: # Found an existing tab by that name. Add this code change to that tab
codeChangesListFrame = versionTab.winfo_children()[0]
else: # Create a new version tab, and add this code change to that
versionTab = Frame( self.gameVersionsNotebook )
indexJustBeforeLast = len( self.gameVersionsNotebook.tabs() ) - 1
self.gameVersionsNotebook.insert( indexJustBeforeLast, versionTab, text=gameVersionTabName )
# Attempt to move focus to the tab for the currently loaded DOL revision, or to this tab if that doesn't exist.
tabForCurrentlyLoadedDolRevision = 'For ' + dol.revision
if dol.revision and tabForCurrentlyLoadedDolRevision in getTabNames( self.gameVersionsNotebook ):
self.gameVersionsNotebook.select( getTabByName(self.gameVersionsNotebook, tabForCurrentlyLoadedDolRevision) )
else: self.gameVersionsNotebook.select( versionTab )
# Add the left-hand column for the code changes
codeChangesListFrame = VerticalScrolledFrame2( versionTab )
codeChangesListFrame.pack( side='left', fill='both', expand=0, padx=3, pady=4 )
# Add the right-hand column, the new hex field (shared for all code changes)
newHexFieldContainer = Frame( versionTab )
newHexFieldContainer['bg'] = 'orange'
self.attachEmptyNewHexField( versionTab, codeChangesListWillBeEmpty )
newHexFieldContainer.pack( side='left', fill='both', expand=1, padx=0 )
# Load a dol for this game version, for the offset conversion function to reference max offsets/addresses, and for dol section info
loadVanillaDol( dolRevision ) # Won't be repeatedly loaded; stored in memory once loaded for the first time
return dolRevision, versionTab
def attachEmptyNewHexField( self, versionTab, codeChangesListWillBeEmpty ):
codeChangesListFrame, newHexFieldContainer = versionTab.winfo_children()
self.clearNewHexFieldContainer( newHexFieldContainer )
newHexField = ScrolledText( newHexFieldContainer, relief='ridge' )
if codeChangesListWillBeEmpty:
newHexField.insert( 'end', '\n\tStart by selecting a\n\t change to add above ^.' )
else:
newHexField.insert( 'end', '\n\t<- You may select a code change\n\t on the left, or add another\n\t change from the list above ^.' )
newHexField.pack( fill='both', expand=1, padx=2, pady=1 )
# Add an event handler for the newHexField. When the user clicks on it, this will autoselect the first code change module if there is only one and it's unselected
def removeHelpText( event, codeChangesListFrame ):
#event.widget.unbind( '<1>' ) # Remove this event handler
codeChangeModules = codeChangesListFrame.interior.winfo_children()
# Check if there's only one code change module and it hasn't been selected
if len( codeChangeModules ) == 1 and codeChangeModules[0]['bg'] == 'SystemButtonFace':
innerFrame = codeChangeModules[0].winfo_children()[0]
innerFrame.event_generate( '<1>' ) # simulates a click event on the module in order to select it
newHexField.bind( '<1>', lambda e: removeHelpText( e, codeChangesListFrame ) )
def addCodeChangeModule( self, changeType, customCodeLength=0, offset='', originalHex='', newHex='', dolRevision='' ):
if not dolRevision: # This is a brand new (blank) code change being added.
self.undoableChanges = True
self.updateSaveStatus( True )
# Create a new notebook for game versions, and/or a tab for this specific revision, if needed.
dolRevision, versionTab = self.addGameVersionTab( dolRevision, False )
codeChangesListFrame, newHexFieldContainer = versionTab.winfo_children()
# Create the GUI's frame which will hold/show this code change
codeChangeModule = Frame( codeChangesListFrame.interior, relief='ridge', borderwidth=3 )
codeChangeModule.pack( fill='both', expand=1, padx=0, pady=0 )
codeChangeModule['bg'] = 'SystemButtonFace'
# Process the offset value, based on the type of code change and current File/RAM offset display mode
processedOffset = offset.replace('<', '').replace('>', '') # Angle bracket removal for function names.
if ( changeType == 'static' or changeType == 'injection' ) and processedOffset:
vanillaDol = loadVanillaDol( dolRevision, False ) # This will report any errors it has
if not validHex( processedOffset.replace( '0x', '' ) ):
msg( 'Warning! Invalid hex was detected in the offset value, "' + offset + '".' )
elif vanillaDol: # Applicable offset for further processing, and Original DOL reference successfully loaded
# Convert the offset value based on the current offset view mode (to display offsets as DOL Offsets or RAM Addresses)
offsetView = settings.get( 'General Settings', 'offsetView' ).lstrip()[0].lower()
if offsetView.startswith( 'd' ): # d for 'dolOffset'; indicates that the value should be shown as a DOL Offset
computedOffset = normalizeDolOffset( processedOffset, dolObj=vanillaDol, returnType='string' ) # Converts to a DOL Offset
else: # The value should be shown as a RAM address
computedOffset = normalizeRamAddress( processedOffset, dolObj=vanillaDol, returnType='string' )
# If there was an error processing the offset, preserve the original value
if computedOffset != -1: # Error output of normalization functions above should be an int regardless of returnType
processedOffset = computedOffset
# Add the passed arguments as properties for this code change [frame] object.
codeChangeModule.changeType = changeType
codeChangeModule.newHexLabelText = ''
codeChangeModule.customCodeLength = StringVar()
codeChangeModule.customCodeLength.set( '(' + uHex(customCodeLength) + ' bytes)' )
# Begin creating the inner part of the module.
innerFrame = Frame( codeChangeModule ) # Used to create a thicker, orange border.
innerFrame.pack( fill='both', expand=0, padx=2, pady=1 )
# Top row; Change type, custom code length, and remove button
topRow = Frame( innerFrame )
topRow.pack( fill='x', padx=6, pady=4 )
Label( topRow, text='Type:' ).pack( side='left' )
Label( topRow, text=presentableModType( changeType, changeType=True ), foreground='#03f' ).pack( side='left' )
ttk.Button( topRow, text='Remove', command=lambda: self.removeCodeChange(codeChangeModule) ).pack( side='right' )
updateBtn = ttk.Button( topRow, image=imageBank['updateArrow'], command=lambda: self.updateModule( codeChangesListFrame.master, codeChangeModule, userActivated=True ) )
updateBtn.pack( side='right', padx=12 )
ToolTip( updateBtn, 'Use this button to update the byte count of custom code, or, once an offset is given, use it to look up and set the original '
'hex value. For static overwrites, both an offset and custom code must be provided to get the original hex value '
'(so that it can be determined how many bytes to look up, since static overwrites can be any length).', delay=1000, wraplength=400, follow_mouse=1 )
Label( topRow, textvariable=codeChangeModule.customCodeLength ).pack( side='left', padx=5 )
# Bottom row; offset and orig hex / injection site / function name
bottomRow = Frame( innerFrame )
# The offset
if changeType == 'static' or changeType == 'injection':
Label( bottomRow, text='Offset:' ).pack( side='left', padx=7 )
codeChangeModule.offset = ttk.Entry( bottomRow, width=11 )
codeChangeModule.offset.insert( 0, processedOffset )
codeChangeModule.offset.pack( side='left', padx=7 )
self.initUndoHistory( codeChangeModule.offset, processedOffset )
codeChangeModule.offset.offsetEntry = True # Flag used in undo history feature
# The original hex, injection site, and function name fields (also labels for the shared new hex field)
if changeType == 'static':
Label( bottomRow, text='Original Hex:' ).pack( side='left', padx=7 )
codeChangeModule.originalHex = ttk.Entry( bottomRow, width=11 )
codeChangeModule.originalHex.insert( 0, originalHex )
codeChangeModule.originalHex.pack( side='left', padx=7, fill='x', expand=1 )
self.initUndoHistory( codeChangeModule.originalHex, originalHex )
codeChangeModule.newHexLabelText = 'New Hex:'
elif changeType == 'injection':
Label( bottomRow, text='Original Hex at\nInjection Site:' ).pack( side='left', padx=7 )
codeChangeModule.originalHex = ttk.Entry( bottomRow, width=11 )
codeChangeModule.originalHex.insert( 0, originalHex )
codeChangeModule.originalHex.pack( side='left', padx=7 )
self.initUndoHistory( codeChangeModule.originalHex, originalHex )
codeChangeModule.newHexLabelText = 'Injection Code:'
elif changeType == 'standalone':
Label( bottomRow, text='Function Name:' ).pack( side='left', padx=7 )
codeChangeModule.offset = ttk.Entry( bottomRow )
codeChangeModule.offset.insert( 0, processedOffset )
codeChangeModule.offset.pack( side='left', fill='x', expand=1, padx=7 )
self.initUndoHistory( codeChangeModule.offset, processedOffset )
codeChangeModule.newHexLabelText = 'Function Code:'
elif changeType == 'gecko':
codeChangeModule.newHexLabelText = 'Gecko Code:'
bottomRow.pack( fill='x', expand=1, pady=4 )
# Attach a newHex entry field (this will be attached to the GUI on the fly when a module is selected)
newHexValue = newHex.replace( '|S|', '' )
codeChangeModule.newHexField = ScrolledText( newHexFieldContainer, relief='ridge' )
codeChangeModule.newHexField.insert( 'end', newHexValue )
self.initUndoHistory( codeChangeModule.newHexField, newHexValue )
# Bind a left-click event to this module (for selecting it radio-button style).
innerFrame.bind( '<1>', self.codeChangeSelected )
for frame in innerFrame.winfo_children():
frame.bind( '<1>', self.codeChangeSelected )
for widget in frame.winfo_children():
if widget.winfo_class() != 'TButton' and widget.winfo_class() != 'TEntry': # Exclude binding from the remove button and input fields.
widget.bind( '<1>', self.codeChangeSelected )
def removeCodeChange( self, codeChangeModule ):
versionTab = root.nametowidget( self.gameVersionsNotebook.select() )
# Reset the newHex field if it is set for use by the currently selected module.
if codeChangeModule['bg'] == 'orange':
# Detach the previously selected module's newHex field
newHexFieldContainer = versionTab.winfo_children()[1]
self.clearNewHexFieldContainer( newHexFieldContainer )
self.attachEmptyNewHexField( versionTab, False )
# Delete the code change module, and update the save status
codeChangeModule.destroy()
self.undoableChanges = True
self.updateSaveStatus( True )
# If this is the last code change, remove this version tab from the notebook
codeChangesListFrame = codeChangeModule.master
if codeChangesListFrame.winfo_children() == []:
versionTab.destroy()
# If this is the last version tab, remove the Share and Save buttons, and the code changes container (notebook)
if len( self.gameVersionsNotebook.tabs() ) == 1:
self.gameVersionsNotebook.destroy()
self.gameVersionsNotebook = None
for widget in self.buttonsFrame.winfo_children()[-4:]: # Selects only the last (left-most) 4 buttons added
widget.destroy()
else:
self.gameVersionsNotebook.select( self.gameVersionsNotebook.tabs()[0] ) # Select the first tab.
def importGeckoCode( self ):
# Prompt the user to enter the Gecko code
userMessage = "Copy and paste your Gecko code here.\nCurrently, only opCodes 04, 06, and C2 are supported."
entryWindow = PopupScrolledTextWindow( root, title='Gecko Codes Import', message=userMessage, width=55, height=22, button1Text='Import' )
if not entryWindow.entryText: return
# Get the revision for this code
if dol.revision:
dolRevision = dol.revision
else: # Prompt the user for it
revisionWindow = RevisionPromptWindow( labelMessage='Choose the region and game version that this code is for.', regionSuggestion='NTSC', versionSuggestion='02' )
# Check the values gained from the user prompt (empty strings mean they closed or canceled the window)
if not revisionWindow.region or not revisionWindow.version: return
else:
dolRevision = revisionWindow.region + ' ' + revisionWindow.version
# Parse the gecko code input and create code change modules for the changes
title, newAuthors, description, codeChanges = parseGeckoCode( dolRevision, entryWindow.entryText )
# Set the mod's title and description, if they have not already been set
if not self.getInput( self.titleEntry ):
self.titleEntry.insert( 0, title )
self.updateTabName()
if description:
if self.getInput( self.descScrolledText ): # If there is already some content, add a line break before the new description text
self.descScrolledText.insert( 'end', '\n' )
self.descScrolledText.insert( 'end', description )
# Add any authors not already added
currentAuthors = [ name.strip() for name in self.getInput( self.authorsEntry ).split(',') if name != '' ]
for name in newAuthors.split( ',' ):
if name.strip() not in currentAuthors:
currentAuthors.append( name.strip() )
self.authorsEntry.delete( 0, 'end' )
self.authorsEntry.insert( 'end', ', '.join(currentAuthors) )
# Add the new code change modules
for changeType, customCodeLength, offset, originalCode, customCode, _ in codeChanges:
self.addCodeChangeModule( changeType, customCodeLength, offset, originalCode, customCode, dolRevision )
# Mark that these changes have not been saved yet, and update the status display
self.undoableChanges = True
self.updateSaveStatus( True )
playSound( 'menuChange' )
def getCurrentlySelectedModule( self, versionTab ):
# Get the modules' parent frame widget for the currently selected version tab.
codeChangesListFrame = versionTab.winfo_children()[0].interior
# Loop over the child widgets to search for the currently selected code change module
for codeChangeModule in codeChangesListFrame.winfo_children():
if codeChangeModule['bg'] != 'SystemButtonFace': return codeChangeModule
else: return None
def clearNewHexFieldContainer( self, newHexFieldContainer ): # Ensures all newHex fields are detached from the GUI.
for widget in newHexFieldContainer.winfo_children():
if widget.winfo_manager(): widget.pack_forget()
def codeChangeSelected( self, event ):
# Get the modules (parent frames) for the current/previously selected code change modules.
versionTab = root.nametowidget( self.gameVersionsNotebook.select() )
# Deselect any previously selected module
previouslySelectedModule = self.getCurrentlySelectedModule( versionTab )
if previouslySelectedModule: previouslySelectedModule['bg'] = 'SystemButtonFace'
# Get the frame widget of the code change module that was selected and change its border color.
codeChangeModule = event.widget
while not hasattr( codeChangeModule, 'changeType' ): codeChangeModule = codeChangeModule.master
codeChangeModule['bg'] = 'orange'
# Detach the previously selected module's newHex field
newHexFieldContainer = versionTab.winfo_children()[1]
self.clearNewHexFieldContainer( newHexFieldContainer )
# Attach the newHex field of the newly selected code change module (all newHex widgets share the same parent)
codeChangeModule.newHexField.pack( fill='both', expand=1, padx=2, pady=1 )
codeChangeModule.newHexField.focus_set() # Ensures that keypresses will go to the newly attached text field, not the old one
def updateTabName( self ):
# The timeout above is done, update the tab for this mod.
newTitle = self.titleEntry.get()
# Get the tab containing this mod; move up the GUI heirarchy until the notebook's tab (a TFrame in this case) is found.
tabFrame = self.titleEntry.master
while tabFrame.winfo_class() != 'TFrame': tabFrame = tabFrame.master
# Modify the tab's title
if newTitle.strip() == '': newTitle = 'New Mod'
if len( newTitle ) > 40: newTitle = newTitle[:40].rstrip() + '...'
constructionNotebook.tab( tabFrame, text=newTitle )
def updateModule( self, versionTab, codeChangeModule, userActivated=False ):
""" Updates the module's code length and original hex values. """
changeType = codeChangeModule.changeType
# Update the module's code length display
newHex = codeChangeModule.newHexField.get( '1.0', 'end' ).strip()
customCodeLength = getCustomCodeLength( newHex, preProcess=True, includePaths=self.includePaths )
codeChangeModule.customCodeLength.set( '(' + uHex(customCodeLength) + ' bytes)' )
# Make sure there is something to update; Gecko modules and SFs don't have an 'originalHex' or 'offset' field to update.
if changeType == 'gecko' or changeType == 'standalone':
return
offsetString = codeChangeModule.offset.get().replace( '0x', '' ).strip()
# Update the original hex value
codeChangeModule.originalHex.delete( 0, 'end' )
if ( changeType == 'static' or changeType == 'injection' ) and offsetString != '':
if not validHex( offsetString.replace( '0x', '' ) ): msg( 'Invalid offset hex.' )
else:
# Determine the length of data to get from the vanilla DOL.
byteCountToRead = 0
if changeType == 'injection': byteCountToRead = 4
elif changeType == 'static' and newHex != '': byteCountToRead = customCodeLength
if byteCountToRead > 0:
# Load a dol for reference if one is not already loaded (there may be no dol currently loaded)
revision = self.gameVersionsNotebook.tab( versionTab, "text" )[4:]
vanillaDol = loadVanillaDol( revision )
# If the given offset is a RAM address, convert it to a DOL offset
dolOffset = -1
if len( offsetString ) == 8 and offsetString.startswith('8'): # 0x has already been removed (always assumed to be a hex value)
offset = int( offsetString[1:], 16 )
if offset >= 0x3100:
if offset < vanillaDol.maxRamAddress: dolOffset = offsetInDOL( offset, vanillaDol.sectionInfo )
else: codeChangeModule.originalHex.insert( 0, 'Address is too big' )
else: codeChangeModule.originalHex.insert( 0, 'Address is too small' )
else:
offset = int( offsetString, 16 )
if offset >= 0x100:
if offset < vanillaDol.maxDolOffset: dolOffset = offset
else: codeChangeModule.originalHex.insert( 0, 'Offset is too big' )
else: codeChangeModule.originalHex.insert( 0, 'Offset is too small' )
# Get the original hex data, and update the GUI with the value.
if dolOffset != -1:
codeChangeModule.originalHex.insert( 0, vanillaDol.data[dolOffset*2:(dolOffset+byteCountToRead)*2].upper() )
def queueUndoStatesUpdate( self, event ):
widget = event.widget
# Ignore certain keys which won't result in content changes. todo: Could probably add some more keys to this
if event.keysym in ( 'Shift_L', 'Shift_R', 'Control_L', 'Control_R', 'Alt_L', 'Alt_R', 'Caps_Lock', 'Left', 'Up', 'Right', 'Down', 'Home', 'End', 'Num_Lock' ):
return
# Cancel any pending undo state; instead, wait until there is a sizable state/chunk of data to save
if widget.undoStateTimer: widget.after_cancel( widget.undoStateTimer )
# Start a new timer, to grab all changes within a certain time for one undo state
widget.undoStateTimer = widget.after( 800, lambda w=widget: self.addUndoState(w) )
def addUndoState( self, widget ):
""" This is responsible for adding new undo/redo states to the undoStates list.
If this is called and the widget's contents are the same as the current history state,
then non-editing keys were probably pressed, such as an arrow key or CTRL/SHIFT/etc, in
which case this method will just exit without creating a new state. """
if widget.undoStateTimer: # This method may have been called before this fired. Make sure it doesn't fire twice!
widget.after_cancel( widget.undoStateTimer )
widget.undoStateTimer = None
# Get what's currently in the input field
if widget.winfo_class() == 'Text': # Includes ScrolledText widgets
currentContents = widget.get( '1.0', 'end' ).strip().encode( 'utf-8' )
else: # Pulling from an Entry widget
currentContents = widget.get().strip().encode( 'utf-8' )
# Check if the widget's contents have changed since the last recorded undo state. If they haven't, there's nothing more to do here.
if currentContents == widget.undoStates[widget.undoStatesPosition]: # Comparing against the current history state
return
# Discard any [potential redo] history beyond the current position
widget.undoStates = widget.undoStates[:widget.undoStatesPosition + 1]
# Add the new current state to the undo history, and set the current history position to it
widget.undoStates.append( currentContents )
widget.undoStatesPosition = len( widget.undoStates ) - 1
# Limit the size of the undo list (commented out due to currently irreconcilable issues (index out of range) with savedContents/undoPosition) todo: fix?
# if len( widget.undoStates ) > 10:
# widget.undoStates = widget.undoStates[-10:] # Forgets the earliest states
# Check if this is a code modification offset (DOL Offset or RAM Address); Adds some special processing
if getattr( widget, 'offsetEntry', False ):
versionTab = root.nametowidget( self.gameVersionsNotebook.select() )
codeChangeModule = widget.master
while not hasattr( codeChangeModule, 'changeType' ): codeChangeModule = codeChangeModule.master
self.updateModule( versionTab, codeChangeModule )
# If this is the mod title, also update the name of this tab
elif widget == self.titleEntry: self.updateTabName()
# Update the save status
if currentContents != widget.savedContents:
self.updateSaveStatus( True )
else: # Can't be sure of changes, so perform a more thorough check
self.updateSaveStatus( self.changesArePending() )
def undo( self, event ):
widget = event.widget
# If changes are pending addition to the widget's undo history, process them immediately before proceeding
if widget.undoStateTimer: self.addUndoState( widget )
# Decrement the current position within the undo history
if widget.undoStatesPosition > 0:
widget.undoStatesPosition -= 1
self.restoreUndoState( widget )
return 'break' # Meant to prevent the keypresses that triggered this from propagating to other events
def redo( self, event ):
widget = event.widget
# If changes are pending addition to the widget's undo history, process them immediately before proceeding
if widget.undoStateTimer: self.addUndoState( widget )
# Increment the current position within the undo history
if widget.undoStatesPosition < len( widget.undoStates ) - 1:
widget.undoStatesPosition += 1
self.restoreUndoState( widget )
return 'break' # Meant to prevent the keypresses that triggered this from propagating to other events
def restoreUndoState( self, widget ):
newContents = widget.undoStates[widget.undoStatesPosition]
# Update the contents of the widget
if widget.winfo_class() == 'Text':
entryPoint = '1.0'
else: entryPoint = 0
widget.delete( entryPoint, 'end' )
widget.insert( 'end', newContents )
# If there's a difference between the current input and the saved state, there are certainly pending changes.
if newContents != widget.savedContents:
self.updateSaveStatus( True )
else: # Can't be sure of changes, so perform a more thorough check
self.updateSaveStatus( self.changesArePending() )
def getInput( self, widget ): # Gets a text or entry widget's current input while forcing undo history updates.
# If changes are pending addition to the widget's undo history, process them immediately before proceeding
if widget.undoStateTimer: self.addUndoState( widget )
if widget.winfo_class() == 'Text': # Includes ScrolledText widgets
return widget.get( '1.0', 'end' ).strip().encode( 'utf-8' )
else: # Pulling from an Entry widget
return widget.get().strip().encode( 'utf-8' )
def widgetHasUnsavedChanges( self, widget ):
currentContents = self.getInput( widget )
# Compare the current contents to what was last saved
if currentContents != widget.savedContents:
return True
else:
return False
def changesArePending( self ):
if self.undoableChanges: # This is a flag for changes that have been made which "undo" can't be used for
return True
# Check all current code change modules for changes
if self.gameVersionsNotebook:
for windowName in self.gameVersionsNotebook.tabs()[:-1]: # Ignores versionChangerTab.
versionTab = root.nametowidget( windowName )
codeChangesListFrame = versionTab.winfo_children()[0].interior
for codeChangeModule in codeChangesListFrame.winfo_children():
# Check the 'New Hex' (i.e. new asm/hex code) field
if self.widgetHasUnsavedChanges( codeChangeModule.newHexField ):
return True
# Get module label and Entry input field widgets
innerFrame = codeChangeModule.winfo_children()[0]
bottomFrameChildren = innerFrame.winfo_children()[1].winfo_children()
# Check widgets which have undo states for changes
for widget in bottomFrameChildren:
if getattr( widget, 'undoStates', False ) and self.widgetHasUnsavedChanges( widget ):
return True
# Check the title/author/description for changes
for widget in ( self.titleEntry, self.authorsEntry, self.descScrolledText ):
if self.widgetHasUnsavedChanges( widget ): return True
return False
def updateSaveStatus( self, changesPending, message='' ):
if changesPending:
if not message: message = 'Unsaved'
self.saveStatusLabel['foreground'] = '#a34343' # Shade of red
else:
self.saveStatusLabel['foreground'] = '#292' # Shade of green
self.saveStatus.set( message )
def buildModString( self ):
""" Builds a string to store/share this mod in MCM's usual code format. """
# Get the text input for the title and author(s)
title = self.getInput( self.titleEntry )
if title == '': title = 'This Mod Needs a Title!'
authors = self.getInput( self.authorsEntry )
if authors == '': authors = '??'
# Validate the text input to make sure it's only basic ASCII
for subject in ('title', 'authors'):
stringVariable = eval( subject ) # Basically turns the string into one of the variables created above
if not isinstance( stringVariable, str ):
typeDetected = str(type(stringVariable)).replace("<type '", '').replace("'>", '')
msg('The input needs to be ASCII, however ' + typeDetected + \
' was detected in the ' + subject + ' string.', 'Input Error')
return ''
# Add the description and any web links (basically parsed with descriptions, so they're included together)
description = '\n' + self.getInput( self.descScrolledText ).encode( 'ascii', 'ignore' ) # The encode filters out any non-ascii characters. todo: warn user?
webLinksLines = []
for urlObj, comments in self.webLinks: # Comments should still have the '#' character prepended
webLinksLines.append( '<{}>{}'.format(urlObj.geturl(), comments) )
if webLinksLines: description += '\n' + '\n'.join( webLinksLines )
if description == '\n': description = '' # Remove the line break if a description is not present.
modString = ['\n' + title + description + '\n[' + authors + ']']
codeChangesHeader = 'Revision ---- DOL Offset ---- Hex to Replace ---------- ASM Code -'
addChangesHeader = False
if self.gameVersionsNotebook:
# Gather information from each game version tab
for windowName in self.gameVersionsNotebook.tabs()[:-1]: # Ignores versionChangerTab.
versionTab = root.nametowidget( windowName )
codeChangesListFrame = versionTab.winfo_children()[0].interior
revision = self.gameVersionsNotebook.tab( versionTab, option='text' )[4:]
addVersionHeader = True
# Iterate the codeChanges for this game version
for codeChangeModule in codeChangesListFrame.winfo_children():
changeType = codeChangeModule.changeType
# Saves the new hex field (if this module is selected), and updates the module's code length and original hex values
self.updateModule( versionTab, codeChangeModule )
# Get the newHex input.
newHex = self.getInput( codeChangeModule.newHexField )
if newHex.startswith('0x'): newHex = newHex[2:] # Don't want to replace all instances
if changeType == 'static' or changeType == 'injection':
addChangesHeader = True
# Get the offset and original (vanilla game code) hex inputs. Remove whitespace & hex identifiers.
offset = ''.join( self.getInput(codeChangeModule.offset).split() ).replace('0x', '')
originalHex = ''.join( self.getInput(codeChangeModule.originalHex).split() ).replace('0x', '')
# Check that inputs have been given, and then convert the ASM to hex if necessary.
if offset == '' or newHex == '':
msg( 'There are offset or new code values missing\nfor some ' + revision + ' changes.' )
self.gameVersionsNotebook.select( windowName )
return ''
elif originalHex == '' or not validHex( originalHex ):
msg( 'There are Original Hex values missing or invalid among the ' + revision + ' changes, and new values could not be determined. An offset may be incorrect.' )
return ''
# Create the beginning of the line (revision header, if needed, with dashes).
headerLength = 13
if addVersionHeader:
lineHeader = revision + ' ' + ('-' * ( headerLength - 1 - len(revision) )) # extra '- 1' for the space after revision
addVersionHeader = False
else: lineHeader = '-' * headerLength
# Build a string for the offset portion
numOfDashes = 8 - len( offset )
dashes = '-' * ( numOfDashes / 2 ) # if numOfDashes is 1 or less (including negatives), this will be an empty string
if numOfDashes % 2 == 0: # If the number of dashes left over is even (0 is even)
offsetString = dashes + ' ' + '0x' + offset + ' ' + dashes
else: # Add an extra dash at the end (the int division above rounds down)
offsetString = dashes + ' ' + '0x' + offset + ' ' + dashes + '-'
# Build a string for a standard (short) static overwrite
if changeType == 'static' and len(originalHex) <= 16 and newHex.splitlines()[0].split('#')[0] != '': # Last check ensures there's actually code, and not just comments/whitespace
modString.append( lineHeader + offsetString + '---- ' + originalHex + ' -> ' + newHex )
# Long static overwrite
elif changeType == 'static':
modString.append( lineHeader + offsetString + '----\n\n' + customCodeProcessor.beautifyHex(originalHex) + '\n\n -> \n\n' + newHex + '\n' )
# Injection mod
else:
modString.append( lineHeader + offsetString + '---- ' + originalHex + ' -> Branch\n\n' + newHex + '\n' )
elif changeType == 'gecko':
if addVersionHeader: modString.append( revision + '\n' + newHex + '\n' )
else: modString.append( newHex + '\n' )
elif changeType == 'standalone':
functionName = self.getInput( codeChangeModule.offset )
# Check that a function name was given, and then convert the ASM to hex if necessary.
if functionName == '':
msg( 'A standalone function among the ' + revision + ' changes is missing a name.' )
self.gameVersionsNotebook.select( windowName )
return ''
elif ' ' in functionName:
msg( 'Function names may not contain spaces. Please rename those for ' + revision + ' and try again.' )
self.gameVersionsNotebook.select( windowName )
return ''
# Add the name wrapper and version identifier
functionName = '<' + functionName + '> ' + revision
# Assemble the line string with the original and new hex codes.
modString.append( functionName + '\n' + newHex + '\n' )
if addChangesHeader:
modString.insert( 1, codeChangesHeader ) # Inserts right after the initial title/author/description string
return '\n'.join( modString )
def _getRequiredStandaloneFunctionNames( self ):
""" Gets the names of all standalone functions this mod requires. """
if not self.gameVersionsNotebook or not self.gameVersionsNotebook.tabs(): # Latter check is a failsafe; not expected
return [], []
functionNames = set()
missingFunctions = set()
# Iterate over each game revision
for windowName in self.gameVersionsNotebook.tabs()[:-1]: # Ignores versionChangerTab.
versionTab = root.nametowidget( windowName )
codeChangesListFrame = versionTab.winfo_children()[0].interior
# Iterate the codeChanges for this game version
for codeChangeModule in codeChangesListFrame.winfo_children():
# Get the code for this code change, and pre-process it
newHex = self.getInput( codeChangeModule.newHexField )
preProcessedCustomCode = customCodeProcessor.preAssembleRawCode( newHex, self.includePaths )[1]
functionNames, missingFunctions = parseCodeForStandalones( preProcessedCustomCode, functionNames, missingFunctions )
# If the current code change module is a standalone function, make sure it's not in the set of "missing" SFs
if codeChangeModule.changeType == 'standalone' and missingFunctions:
thisFunctionName = self.getInput( codeChangeModule.offset )
missingFunctions.remove( thisFunctionName )
return list( functionNames ), list( missingFunctions ) # functionNames will also include those that are missing
def saveModToLibraryAs( self ):
""" Saves a mod to a new location. Wrapper for the saveModToLibrary method. """
# Remember the original values for save location (in case they need to be restored), and then clear them
originalSourceFile = self.sourceFile
originalFileIndex = self.fileIndex
originalMajorChanges = self.undoableChanges
# Clear the save location properties for this mod. This forces the save function to default to creating a new file
self.sourceFile = ''
self.fileIndex = -1
self.undoableChanges = True
# Attempt to save the mod
saveSuccedded = self.saveModToLibrary()
# If the save failed, restore the previous save location & status
if not saveSuccedded:
self.sourceFile = originalSourceFile
self.fileIndex = originalFileIndex
self.undoableChanges = originalMajorChanges
def saveModToLibrary( self ):
# Make sure there are changes to be saved
if not self.changesArePending():
self.updateSaveStatus( False, 'No Changes to be Saved' )
self.saveStatusLabel['foreground'] = '#333' # Shade of gray
return
# Confirm that this mod (if this is something from the Mod Library tab) is not currently installed
modInstalled = False
if self.sourceFile and self.fileIndex != -1:
for mod in genGlobals['allMods']:
# Compare by file and file position
if mod.fileIndex == self.fileIndex and mod.sourceFile == self.sourceFile:
# Double-check that this is the correct mod by name
if mod.name != self.getInput( self.titleEntry ):
userConfirmedEquivalency = tkMessageBox.askyesno( 'Confirm Mod Equivalency', 'Is this the same mod as "' + mod.name + '" in the Mods Library? ', parent=root )
if not userConfirmedEquivalency:
# Mod save location lost; inform the user and prepare to save this as a new mod.
msg( "The Mods Library reference to the mod being edited has been lost (likely due to a duplicate mod, or manual modification/reordering of "
'the Mods Library after opening this mod in the Mod Construction tab). Without verification of this reference, the original '
'save location of this mod cannot be certain. To prevent the overwrite of a different mod, this must be saved as a new mod.' )
self.saveModToLibraryAs()
return
# Equivalent mod found. Check whether it's installed in a game disc/DOL
if mod.state == 'enabled' or mod.state == 'pendingDisable':
modInstalled = True
break
if modInstalled:
msg( 'Mods must be uninstalled from your game before modifying them.' )
self.updateSaveStatus( True, 'Unable to Save' )
return False
modString = self.buildModString() # This will also immediately update any pending undo history changes
if not modString: # Failsafe, assuming the method above will report any possible errors
self.updateSaveStatus( True, 'Unable to Save' )
return False
saveSuccessful = False
userCanceled = False
# Prompt for a file to save to if no source file is defined. (Means this was newly created in the GUI, or this is a 'SaveAs' operation)
if self.sourceFile == '':
targetFile = tkFileDialog.askopenfilename(
title="Choose the file you'd like to save the mod to (it will be appended to the end).",
initialdir=getModsFolderPath(),
filetypes=[ ('Text files', '*.txt'), ('all files', '*.*') ]
)
if not targetFile:
userCanceled = True
else:
# Append this mod to the end of the target Mod Library text file (could be a new file, or an existing one).
try:
if os.path.exists( targetFile ):
# Set this mod's save location so that subsequent saves will automatically go to this same place, and check if a separator is needed.
self.sourceFile = targetFile
with open( targetFile, 'r') as modFile:
fileContents = modFile.read()
if fileContents:
self.fileIndex = len( fileContents.split( '-==-' ) )
modString = '\n\n\n\t-==-\n\n' + modString # Prepends a separator to this mod.
else: self.fileIndex = 0
# Save the mod to the file.
with open( targetFile, 'a' ) as libraryFile:
libraryFile.write( modString )
saveSuccessful = True
except Exception as err:
print 'Unable to save the mod to the library file:'
print err
# Rebuild the include paths list, using this new file for one of the paths
modsFolderIncludePath = os.path.join( getModsFolderPath(), '.include' )
self.includePaths = [ os.path.dirname(targetFile), modsFolderIncludePath, os.path.join(scriptHomeFolder, '.include') ]
else: # A source file is already defined.
if self.fileIndex == -1: msg( "The index (file position) for this mod could not be determined. Try using 'Save As' to save this mod to the end of a file." )
else:
targetFile = self.sourceFile
# Make sure the target file can be found, then replace the mod within it with the new version.
if not os.path.exists( targetFile ):
msg( 'Unable to locate the Library file:\n\n' + targetFile )
else:
try:
# Pull the mods from their library file, and separate them.
with open( targetFile, 'r') as modFile:
mods = modFile.read().split( '-==-' )
# Replace the old mod, reformat the space in-between mods, and recombine the file's text.
mods[self.fileIndex] = modString
mods = [code.strip() for code in mods] # Removes the extra whitespace around mod strings.
completedFileText = '\n\n\n\t-==-\n\n\n'.join( mods )
with open( targetFile, 'w' ) as libraryFile:
libraryFile.write( completedFileText )
saveSuccessful = True
except: pass
if saveSuccessful:
# Iterate over all code change modules to update their save state history (saved contents updated to match current undo history index)
for windowName in self.gameVersionsNotebook.tabs()[:-1]: # Ignores versionChangerTab.
versionTab = root.nametowidget( windowName )
codeChangesListFrame = versionTab.winfo_children()[0].interior
# Iterate the codeChanges for this game version
for codeChangeModule in codeChangesListFrame.winfo_children():
if getattr( codeChangeModule.newHexField, 'undoStates', None ): # May return an empty list, in which case the contents haven't been modified
# Update the 'New Hex' (i.e. new asm/hex code) field
codeChangeModule.newHexField.savedContents = codeChangeModule.newHexField.get( '1.0', 'end' ).strip().encode( 'utf-8' )
# Get module label and Entry input field widgets
innerFrame = codeChangeModule.winfo_children()[0]
bottomFrameChildren = innerFrame.winfo_children()[1].winfo_children()
# Update those widgets (which have undo states) with changes
for widget in bottomFrameChildren:
if getattr( widget, 'undoStates', False ):
widget.savedContents = widget.get().strip().encode( 'utf-8' )
# Update the saved contents for the standard input fields
for widget in ( self.titleEntry, self.authorsEntry, self.descScrolledText ):
if getattr( widget, 'undoStates', None ): # May return an empty list, in which case the contents haven't been modified
if widget.winfo_class() == 'Text': # Includes ScrolledText widgets
widget.savedContents = widget.get( '1.0', 'end' ).strip().encode( 'utf-8' )
else: # Pulling from an Entry widget
widget.savedContents = widget.get().strip().encode( 'utf-8' )
# Update the flag used for tracking other undoable changes
self.undoableChanges = False
# Give an audio cue and update the GUI
playSound( 'menuSelect' )
self.updateSaveStatus( False, 'Saved To Library' )
# Reload the library to get the new or updated codes.
scanModsLibrary( playAudio=False )
else:
if userCanceled:
self.updateSaveStatus( True, 'Operation Canceled' )
else:
self.updateSaveStatus( True, 'Unable to Save' )
return saveSuccessful
def analyzeMod( self ):
""" Collects information on this mod, and shows it to the user in a pop-up text window. """ # todo: switch to joining list of strings for efficiency
# Assemble the header text
analysisText = 'Info for "' + self.getInput( self.titleEntry ) + '"'
analysisText += '\nProgram Classification: ' + self.type
if os.path.isdir( self.sourceFile ):
analysisText += '\nSource Folder: ' + self.sourceFile
elif os.path.exists( self.sourceFile ):
analysisText += '\nSource File: ' + self.sourceFile
analysisText += '\nPosition in file: ' + str( self.fileIndex )
else:
analysisText += '\nSource: Unknown! (The source path could not be found)'
availability = []
totalCodeChanges = 0
changeTypeTotals = {}
# Check for all code changes (absolute total, and totals per type).
if self.gameVersionsNotebook:
for windowName in self.gameVersionsNotebook.tabs()[:-1]: # Skips tab used for adding revisions
versionTab = root.nametowidget( windowName ) # Gets the actual widget for this tab
availability.append( self.gameVersionsNotebook.tab( versionTab, option='text' )[4:] )
codeChangesListFrame = versionTab.winfo_children()[0].interior
codeChangeModules = codeChangesListFrame.winfo_children()
totalCodeChanges += len( codeChangeModules )
# Count the number of changes for each change type, and get the standalone functions required
for codeChange in codeChangeModules:
if codeChange.changeType not in changeTypeTotals: changeTypeTotals[codeChange.changeType] = 1
else: changeTypeTotals[codeChange.changeType] += 1
# Construct strings for what code change types are present, and their counts
analysisText += '\nCode changes available for ' + grammarfyList( availability ) + '\n\nCode Changes (across all game versions):'
for changeType in changeTypeTotals:
analysisText += '\n - ' + presentableModType( changeType ) + 's: ' + str( changeTypeTotals[changeType] )
# Check for required SFs
requiredStandaloneFunctions, missingFunctions = self._getRequiredStandaloneFunctionNames()
if not requiredStandaloneFunctions: analysisText += '\n\nNo Standalone Functions required.'
else:
analysisText += '\n\nRequired Standalone Functions:\n' + '\n'.join( requiredStandaloneFunctions )
if missingFunctions:
analysisText += '\n\nThese functions are required, but are not packaged with this mod:\n' + '\n'.join( missingFunctions )
analysisText += '\n\n\tInclude Paths:\n' + '\n'.join( self.includePaths )
# Present the analysis to the user in a new window
cmsg( analysisText, 'Info for "' + self.getInput( self.titleEntry ) + '"', 'left' )
def switchOffsetDisplayType( self ):
""" Goes through each of the code changes, and swaps between displaying offsets as DOL offsets or RAM addresses.
These are still tracked as strings, so they will be saved in the chosen form in the library files as well. """
# Toggle the saved variable and the button text (and normalize the string since this is exposed to users in the options.ini file)
offsetView = settings.get( 'General Settings', 'offsetView' ).lstrip().lower()
if offsetView.startswith( 'd' ):
offsetView = 'ramAddress'
buttonText = 'Display DOL Offsets'
else:
offsetView = 'dolOffset'
buttonText = 'Display RAM Addresses'
self.offsetViewBtn['text'] = buttonText
# Iterate over the tabs for each game version
for tabWindowName in self.gameVersionsNotebook.tabs()[:-1]: # Skips tab for adding revisions
versionTab = root.nametowidget( tabWindowName )
revision = self.gameVersionsNotebook.tab( versionTab, option='text' )[4:]
codeChangesListFrame = versionTab.winfo_children()[0].interior
codeChangeModules = codeChangesListFrame.winfo_children()
# Iterate over the code change modules for this game version
for i, module in enumerate( codeChangeModules, start=1 ):
if module.changeType == 'static' or module.changeType == 'injection':
# Get the current value
origOffset = module.offset.get().strip().replace( '0x', '' )
# Validate the input
if not validHex( origOffset ):
if len( self.gameVersionsNotebook.tabs() ) > 2: # Be specific with the revision
msg( 'Invalid hex detected for the offset of code change {} of {}: "{}".'.format(i, revision, module.offset.get().strip()), 'Invalid Offset Characters' )
else: # Only one revision to speak of
msg( 'Invalid hex detected for the offset of code change {}: "{}".'.format(i, module.offset.get().strip()), 'Invalid Offset Characters' )
continue
# Convert the value
if offsetView == 'dolOffset':
newOffset = normalizeDolOffset( origOffset, dolObj=originalDols[revision], returnType='string' )
else: newOffset = normalizeRamAddress( origOffset, dolObj=originalDols[revision], returnType='string' )
# Validate the converted ouput value
if newOffset == -1: continue # A warning would have been given to the user from the above normalization function
# Display the value
module.offset.delete( 0, 'end' )
module.offset.insert( 0, newOffset )
# Remember the current display option
settings.set( 'General Settings', 'offsetView', offsetView )
saveOptions()
def closeMod( self ):
# If there are unsaved changes, propt whether the user really wants to close.
if self.saveStatusLabel['foreground'] == '#a34343':
sureToClose = tkMessageBox.askyesno( 'Unsaved Changes', "It looks like this mod may have some changes that haven't been saved to your library. Are you sure you want to close it?" )
if not sureToClose: return
self.master.destroy()
def addWebLink( self, urlObj, comments, modChanged=True ):
url = urlObj.geturl()
domain = urlObj.netloc.split( '.' )[-2] # i.e. 'smashboards' or 'github'
destinationImage = imageBank[ domain + 'Link' ]
# Add an image for this link
imageLabel = ttk.Label( self.webLinksFrame, image=destinationImage )
imageLabel.urlObj = urlObj
imageLabel.comments = comments
imageLabel.pack()
# Add hover tooltips
hovertext = 'The {}{} page...\n{}'.format( domain[0].upper(), domain[1:], url )
if comments: hovertext += '\n\n' + comments
ToolTip( imageLabel, hovertext, delay=700, wraplength=800, justify='center' )
# if modChanged: # If false, this is being called during initialization
# self.undoableChanges = True
class WebLinksEditor( basicWindow ):
""" Tool window to add/remove web links in the Mod Construction tab. """
def __init__( self, webLinksFrame ):
basicWindow.__init__( self, root, 'Web Links Editor', offsets=(160, 100), resizable=True, topMost=False )
Label( self.window, text=('Web links are useful sources of information, or links to places of discussion.'
'\nCurrent valid destinations are SmashBoards, GitHub, and YouTube.'), wraplength=480 ).grid( columnspan=3, column=0, row=0, padx=40, pady=10 )
# Iterate over the widgets in the 'Web Links' frame in the other window, to create new widgets here based on them
row = 1
for label in webLinksFrame.winfo_children():
# Get info from this label widget
url = label.urlObj.geturl()
domain = label.urlObj.netloc.split( '.' )[-2] # i.e. 'smashboards' or 'github'
destinationImage = imageBank[ domain + 'Link' ]
# Can't clone the label, so make a new one
imageLabel = ttk.Label( self.window, image=destinationImage )
imageLabel.grid( column=0, row=row )
# Add a text field entry for the URL
urlEntry = ttk.Entry( self.window, width=70 )
urlEntry.insert( 0, url )
urlEntry.grid( column=1, row=row, sticky='ew' )
# Add the comment below this, if there is one, and the button to add/edit them
if label.comments:
# Add the add/edit comment button
commentsBtn = Button( self.window, text='Edit Comment', anchor='center' )
commentsBtn.grid( column=2, row=row, padx=14, pady=5 )
# Add the comment
commentLabel = ttk.Label( self.window, text=label.comments.lstrip(' #') )
commentLabel.grid( column=1, row=row+1, sticky='new' )
# Add the remove button
removeBtn = Button( self.window, text='-', width=2 )
ToolTip( removeBtn, 'Remove', delay=700 )
removeBtn.grid( column=3, row=row, padx=(0, 14) )
row += 2
else:
# Add the add/edit comment button
commentsBtn = Button( self.window, text='Add Comment', anchor='center' )
commentsBtn.grid( column=2, row=row, padx=14, pady=5 )
# Add the remove button
removeBtn = Button( self.window, text='-', width=2 )
ToolTip( removeBtn, 'Remove', delay=700 )
removeBtn.grid( column=3, row=row, padx=(0, 14) )
row += 1
# Add the 'Add' and 'OK / Cancel' buttons
buttonsFrame = ttk.Frame( self.window )
ttk.Button( buttonsFrame, text='OK' ).pack( side='left', padx=(0, 15) )
ttk.Button( buttonsFrame, text='Cancel', command=self.close ).pack( side='right' )
buttonsFrame.grid( columnspan=4, column=0, row=row, pady=14 )
addBtn = Button( self.window, text='Add Link', width=12 )
addBtn.grid( columnspan=2, column=2, row=row )
# Allow the grid to resize
self.window.columnconfigure( 0, weight=1, minsize=46 )
self.window.columnconfigure( 1, weight=3 )
#self.window.columnconfigure( (2, 3), weight=1 )
self.window.columnconfigure( 2, weight=1, minsize=120 )
self.window.columnconfigure( 3, weight=1, minsize=35 )
self.window.rowconfigure( 'all', weight=1 )
class AsmToHexConverter( basicWindow ):
""" Tool window to convert assembly to hex and vice-verca. """
def __init__( self ):
basicWindow.__init__( self, root, 'ASM <-> HEX Converter', offsets=(160, 100), resizable=True, topMost=False )
self.window.minsize( width=480, height=350 )
Label( self.window, text=('This assembles PowerPC assembly code into raw hex,\nor disassembles raw hex into PowerPC assembly.'
"\n\nNote that this functionality is also built into the entry fields for new code in the 'Add New Mod to Library' interface. "
'So you can use your assembly source code in those fields and it will automatically be converted to hex during installation. '
'\nComments preceded with "#" will be ignored.'), wraplength=480 ).grid( column=0, row=0, padx=40 )
# Create the header row
headersRow = Frame( self.window )
Label( headersRow, text='ASM' ).grid( row=0, column=0, sticky='w' )
self.lengthString = StringVar()
self.lengthString.set( '' )
Label( headersRow, textvariable=self.lengthString ).grid( row=0, column=1 )
Label( headersRow, text='HEX' ).grid( row=0, column=2, sticky='e' )
headersRow.grid( column=0, row=1, padx=40, pady=(7, 0), sticky='ew' )
# Configure the header row, so it expands properly on window-resize
headersRow.columnconfigure( 'all', weight=1 )
# Create the text entry fields and center conversion buttons
entryFieldsRow = Frame( self.window )
self.sourceCodeEntry = ScrolledText( entryFieldsRow, width=30, height=20 )
self.sourceCodeEntry.grid( rowspan=2, column=0, row=0, padx=5, pady=7, sticky='news' )
ttk.Button( entryFieldsRow, text='->', command=self.asmToHexCode ).grid( column=1, row=0, pady=20, sticky='s' )
ttk.Button( entryFieldsRow, text='<-', command=self.hexCodeToAsm ).grid( column=1, row=1, pady=20, sticky='n' )
self.hexCodeEntry = ScrolledText( entryFieldsRow, width=30, height=20 )
self.hexCodeEntry.grid( rowspan=2, column=2, row=0, padx=5, pady=7, sticky='news' )
entryFieldsRow.grid( column=0, row=2, sticky='nsew' )
# Configure the above columns, so that they expand proportionally upon window resizing
entryFieldsRow.columnconfigure( 0, weight=6 )
entryFieldsRow.columnconfigure( 1, weight=1 ) # Giving much less weight to this row, since it's just the buttons
entryFieldsRow.columnconfigure( 2, weight=6 )
entryFieldsRow.rowconfigure( 'all', weight=1 )
# Determine the include paths to be used here, and add a button at the bottom of the window to display them
self.detectContext()
ttk.Button( self.window, text='View Include Paths', command=self.viewIncludePaths ).grid( column=0, row=3, pady=(2, 6), ipadx=20 )
# Add the assembly time display
#self.assemblyTime = StringVar()
#Label( self.window, textvariable=self.assemblyTime ).grid( column=0, row=3, sticky='w', padx=(7, 0) )
self.assemblyTimeDisplay = Tkinter.Entry( self.window, width=25, borderwidth=0 )
self.assemblyTimeDisplay.configure( state="readonly" )
self.assemblyTimeDisplay.grid( column=0, row=3, sticky='w', padx=(7, 0) )
# Configure this window's expansion as a whole, so that only the text entry row can expand when the window is resized
self.window.columnconfigure( 0, weight=1 )
self.window.rowconfigure( 0, weight=0 )
self.window.rowconfigure( 1, weight=0 )
self.window.rowconfigure( 2, weight=1 )
self.window.rowconfigure( 3, weight=0 )
def updateAssemblyDisplay( self, textInput ):
self.assemblyTimeDisplay.configure( state="normal" )
self.assemblyTimeDisplay.delete( 0, 'end' )
self.assemblyTimeDisplay.insert( 0, textInput )
self.assemblyTimeDisplay.configure( state="readonly" )
def asmToHexCode( self ):
# Clear the current hex code field, and assembly time display
self.hexCodeEntry.delete( '1.0', 'end' )
#self.assemblyTimeDisplay.delete( 0, 'end' )
self.updateAssemblyDisplay( '' )
# Get the ASM to convert
asmCode = self.sourceCodeEntry.get( '1.0', 'end' )
# Assemble the code (this will also handle showing any warnings/errors to the user)
tic = time.clock()
returnCode, hexCode = customCodeProcessor.preAssembleRawCode( asmCode, self.includePaths, discardWhitespace=False )
toc = time.clock()
if returnCode != 0:
self.lengthString.set( 'Length: ' )
#self.assemblyTime.set( '' )
return
hexCode = hexCode.replace( '|S|', '' ) # Removes special branch syntax separators
# Insert the new hex code
self.hexCodeEntry.insert( 'end', hexCode )
# Update the code length display
codeLength = getCustomCodeLength( hexCode, preProcess=True, includePaths=self.includePaths ) # requires pre-processing to remove whitespace
self.lengthString.set( 'Length: ' + uHex(codeLength) )
# Update the assembly time display with appropriate units
assemblyTime = round( toc - tic, 9 )
if assemblyTime > 1:
units = 's' # In seconds
else:
assemblyTime = assemblyTime * 1000
if assemblyTime > 1:
units = 'ms' # In milliseconds
else:
assemblyTime = assemblyTime * 1000
units = 'us' # In microseconds
#self.assemblyTime.set( 'Assembly Time: {} us'.format(assemblyTime) ) # In microseconds
#self.assemblyTimeDisplay.insert( 0, 'Assembly Time: {} {}'.format(assemblyTime, units) )
self.updateAssemblyDisplay( 'Assembly Time: {} {}'.format(assemblyTime, units) )
def hexCodeToAsm( self ):
# Delete the current assembly code, and clear the assembly time label
self.sourceCodeEntry.delete( '1.0', 'end' )
#self.assemblyTime.set( '' )
#self.assemblyTimeDisplay.delete( 0, 'end' )
self.updateAssemblyDisplay( '' )
# Get the HEX code to disassemble
hexCode = self.hexCodeEntry.get( '1.0', 'end' )
# Disassemble the code into assembly
returnCode, asmCode = customCodeProcessor.preDisassembleRawCode( hexCode, discardWhitespace=False )
if returnCode != 0:
self.lengthString.set( 'Length: ' )
return
# Replace the current assembly code
self.sourceCodeEntry.insert( 'end', asmCode )
# Update the code length display
codeLength = getCustomCodeLength( hexCode, preProcess=True, includePaths=self.includePaths )
self.lengthString.set( 'Length: ' + uHex(codeLength) )
def detectContext( self ):
""" This window should use the same .include context for whatever mod it was opened alongside;
i.e. whatever tab is selected in the Mod Construction tab. If an associated mod is not found,
fall back on the default import directories. """
self.includePaths = []
self.contextModName = ''
# Check if we're even within the Mod Construction interface
currentMainTabSelected = root.nametowidget( mainNotebook.select() )
if currentMainTabSelected != constructionTab: pass
# Check if there are any mods currently loaded in the Mod Construction interface
elif not constructionNotebook.tabs(): pass
else:
# Get the mod constructor object for the currently selected mod
modConstructor = root.nametowidget( constructionNotebook.select() ).winfo_children()[0]
# Check that this is a mod that's saved somewhere in the library, and get the mod name
if modConstructor.sourceFile:
self.includePaths = modConstructor.includePaths
self.contextModName = modConstructor.getInput( modConstructor.titleEntry ) # Safer than using titleEntry.get() because of undo states
if not self.includePaths:
self.includePaths = [ os.path.join( getModsFolderPath(), '.include' ), os.path.join(scriptHomeFolder, '.include') ]
def viewIncludePaths( self ):
# Build the message to show the user
cwd = os.getcwd() + ' <- Current Working Directory'
if self.contextModName:
contextMessage = '\n\nAssembly context (for ".include" file imports) has the following priority:\n\n{}\n\n [Based on "{}"]'.format( '\n'.join([cwd]+self.includePaths), self.contextModName )
else:
contextMessage = '\n\nAssembly context (for ".include" file imports) has the following priority:\n\n{}\n\n [Default paths]'.format( '\n'.join([cwd]+self.includePaths) )
cmsg( contextMessage, 'Include Paths', 'left' )
#=================================#
# ~ ~ Default Game Settings tab ~ ~ #
#=================================#
def openStageSelectionWindow():
if root.stageSelectionsWindow: root.stageSelectionsWindow.window.deiconify()
else: root.stageSelectionsWindow = stageSelectWindow()
playSound( 'menuChange' )
class stageSelectWindow( object ):
def __init__( self ):
# Create the window, and set the title and framing
self.window = Toplevel()
self.window.title( " Random Stage Selection" )
self.window.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
# Calculate the spawning position of the window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( root )[2:]
self.window.geometry( '+' + str(rootDistanceFromScreenLeft + 140) + '+' + str(rootDistanceFromScreenTop + 100) )
# These stages are listed in the bit order that the game uses to read whether they are enabled for random select. (29 total)
stageList = [ 'Kongo Jungle N64', "Yoshi's Island N64", 'Dream Land N64', 'Final Destination', 'Battlefield', 'Flat Zone',
'Mushroom Kingdom II', 'Fourside', 'Big Blue', 'Poké Floats', 'Venom', 'Green Greens', "Yoshi's Island", 'Brinstar Depths',
'Temple', 'Jungle Japes', 'Rainbow Cruise', 'Mushroom Kingdom', 'Icicle Mountain', 'Onett', 'Mute City', 'Pokemon Stadium',
'Corneria', 'Fountain of Dreams', "Yoshi's Story", 'Brinstar', 'Great Bay', 'Kongo Jungle', "Peach's Castle" ]
# Read the stage data for what is currently set in the DOL
bitString = bin( int(currentGameSettingsValues['stageToggleSetting'].get(), 16) )[2:].zfill(32)
# Create a 6x5 grid and populate it with the stages above.
stageFrame = Frame( self.window, padx=7, pady=7, borderwidth=3, relief='groove' )
column = 0
for i, stage in enumerate( stageList ):
row = ( i / 6 ) + 1
newWidget = Label( stageFrame, text=stage, width=18, height=2 )
newWidget.grid( row=row, column=column, padx=2, pady=3 )
newWidget.state = 'disabled'
newWidget.revertTo = ''
if bitString[i+3] == '1': self.setState( 'enabled', newWidget )
newWidget.bind("<Button-1>", self.stageClicked )
column = column + 1
if column == 6: column = 0
# Add the confirmation buttons.
Button( stageFrame, text='Select All', command=lambda: self.updateStates( 'FFFFFFFF', False, True ) ).grid( row=6, column=0, padx=2, pady=3 )
Button( stageFrame, text='Deselect All', command=lambda: self.updateStates( '00000000', False, True ) ).grid( row=6, column=1, padx=2, pady=3 )
Button( stageFrame, text='Set to Tourney Legal', command=lambda: self.updateStates( 'E70000B0', False, True ) ).grid( row=6, column=2, padx=2, pady=3 )
ttk.Button( stageFrame, text='Cancel', command=self.cancel ).grid( row=6, column=4, padx=2, pady=3 )
self.window.protocol( 'WM_DELETE_WINDOW', self.cancel ) # Overrides the 'X' close button.
ttk.Button( stageFrame, text='OK', command=self.confirm ).grid( row=6, column=5, padx=2, pady=3 )
self.window.protocol( 'WM_DELETE_WINDOW', self.cancel ) # Overrides the 'X' close button.
stageFrame.pack( padx=4, pady=4 )
def updateStates( self, hexString, readingFromDOL, playAudio ): # Sets all stage label/buttons on the Stage Selection Window
bitString = bin( int( hexString, 16) )[2:].zfill(32)[3:] # Omits first 3 bits.
mainFrameChildren = self.window.winfo_children()[0].winfo_children()
if self.window.state() == 'normal': windowOpen = True
else: windowOpen = False
for i, bit in enumerate( bitString ):
widget = mainFrameChildren[i]
if readingFromDOL: # This input is what's actually read from the DOL; so this will be a 'hard' set (no pending status)
if bit == '1': self.setState( 'enabled', widget )
else: self.setState( 'disabled', widget )
widget.revertTo = ''
else: # The input is from some button selection; these settings will be pending a save operation (or reverting a previous selection).
if windowOpen: # Since this update is not a reflection of what's in the DOL, we want it to be cancelable.
widget.revertTo = widget.state # This is used for the cancel and 'X' close buttons to revert changes.
else: widget.revertTo = '' # The window is closed. This should be cleared so that opening and then closing/canceling doesn't undo this update.
if bit == '1':
if widget.state == 'pendingDisable': self.setState( 'enabled', widget )
elif widget.state == 'disabled': self.setState( 'pendingEnable', widget )
else:
if widget.state == 'pendingEnable': self.setState( 'disabled', widget )
elif widget.state == 'enabled': self.setState( 'pendingDisable', widget )
if playAudio: playSound( 'menuChange' )
def setState( self, state, widget ): # Sets a single specific stage label/button on the window
if state == 'pendingEnable':
stateColor = '#aaffaa' # light green
elif state == 'pendingDisable':
stateColor = '#ee9999' # light red
elif state == 'enabled':
stateColor = '#77cc77' # solid green
else: # i.e. 'disabled'
stateColor = 'SystemButtonFace' # The default (disabled) color for label backgrounds
widget['bg'] = stateColor
widget.state = state
def stageClicked( self, event ):
widget = event.widget
widget.revertTo = widget.state # This is used for the cancel and 'X' close buttons to revert changes.
if widget.state != 'unavailable':
if widget.state == 'pendingEnable': state = 'disabled'
elif widget.state == 'pendingDisable': state = 'enabled'
elif widget.state == 'enabled': state = 'pendingDisable'
elif widget.state == 'disabled': state = 'pendingEnable'
else: state = 'disabled' # Failsafe reset.
self.setState( state, widget )
checkForPendingChanges()
playSound( 'menuChange' )
def cancel( self ): # Undo any state changes made to the label widgets, and then close the window.
# Iterate over each label, revert any changes it had, reset its revertTo property, and close the window.
for widget in self.window.winfo_children()[0].winfo_children()[:29]:
if widget.revertTo != '':
self.setState( widget.revertTo, widget )
widget.revertTo = ''
playSound( 'menuBack' )
self.window.withdraw()
def confirm( self ):
# Rebuild the bit string and convert it to hex.
stageBitStr = '111'
for stage in self.window.winfo_children()[0].winfo_children()[:29]: # Iterate throught the first 29 widgets, all of which are stage Labels.
if stage.state == 'pendingEnable' or stage.state == 'enabled': stageBitStr += '1'
else: stageBitStr += '0'
stage.revertTo = ''
# Convert to int with a base of 2, then convert to hex, padded with 0s to 8 characters (albeit the value should never need padding).
hexString = toHex( int(stageBitStr, 2), 8 )
currentGameSettingsValues['stageToggleSetting'].set( hexString )
updateDefaultGameSettingWidget( 'stageToggleControl', hexString ) # Updates the appearance of the button for opening this window
self.window.withdraw()
def openItemSelectionsWindow():
if root.itemSelectionsWindow: root.itemSelectionsWindow.window.deiconify()
else: root.itemSelectionsWindow = itemSelectWindow()
playSound( 'menuChange' )
class itemSelectWindow( object ):
def __init__( self ):
# Create the window, and set the title and framing
self.window = Toplevel()
self.window.title(" Item Switch")
self.window.attributes('-toolwindow', 1) # Makes window framing small, like a toolbox/widget.
# Calculate the spawning position of the window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( root )[2:]
self.window.geometry( '+' + str(rootDistanceFromScreenLeft + 340) + '+' + str(rootDistanceFromScreenTop + 270) )
# Items listed in the bit order that the game uses to read whether they are enabled for random select. (31 total)
# The 4th bit (position 3 if counting from 0) is actually nothing? It has no effect on the Item Switch Screen.
# With first bit on, the previous 4 bytes turn to FFFFFFFF?
itemList = ['Poison Mushroom', 'Warp Star', 'Barrel Cannon', '???', # Confirmed
'Beam Sword', 'Starman', 'Screw Attack', 'Super Scope', # Confirmed
'Star Rod', 'Bunny Hood', 'Red Shell', 'Parasol',
'Cloaking Device', 'Maxim Tomato', 'Motion-Sensor Bomb', 'Metal Box',
'Poke ball', "Lip's Stick", 'Ray Gun', 'Party Ball',
'Super Mushroom', 'Heart Container', 'Fan', 'Hammer',
'Green Shell', 'Freezie', 'Food', 'Flipper',
'Fire Flower', 'Mr. Saturn', 'Home-Run Bat', 'Bob-omb'] # Last 3 confirmed
# Read the data for what is currently selected.
itemBitStr = bin( int(currentGameSettingsValues['itemToggleSetting'].get(), 16) )[2:].zfill( 32 )
# Create a 6x5 grid and populate it with the items above.
itemFrame = Frame(self.window, padx=7, pady=7, borderwidth=3, relief='groove')
column = 0
for i, item in enumerate(itemList):
row = ( i / 6 ) + 1
newWidget = Label(itemFrame, text=item, width=18, height=2)
newWidget.grid(row=row, column=column, padx=2, pady=3)
newWidget.state = 'disabled'
newWidget.revertTo = ''
if itemBitStr[i] == '1': self.setState( 'enabled', newWidget )
newWidget.bind( "<Button-1>", self.itemClicked )
column = column + 1
if column == 6: column = 0
# Add the confirmation buttons.
Button( itemFrame, text='Select All', command=lambda: self.updateStates( 'FFFFFFFF', False, True ) ).grid( row=7, column=0, padx=2, pady=3 )
Button( itemFrame, text='Deselect All', command=lambda: self.updateStates( '00000000', False, True ) ).grid( row=7, column=1, padx=2, pady=3 )
cellFrame = Frame( itemFrame )
Label( cellFrame, text='Item Frequency: ' ).pack( side='left' )
itemFrequencyMimic.set( currentGameSettingsValues['itemFrequencySetting'].get() )
itemFrequencyDisplay = OptionMenu(cellFrame, itemFrequencyMimic, 'None', 'Very Low', 'Low', 'Medium', 'High', 'Very High', 'Extremely High',
command=lambda(value): self.reflectItemFrequencyChanges( itemFrequencyDisplay, value ))
cellFrame.grid( row=7, column=2, columnspan=2 )
itemFrequencyDisplay.revertTo = ''
itemFrequencyDisplay.pack( side='left' )
ttk.Button( itemFrame, text='Cancel', command=self.cancel ).grid( row=7, column=4, padx=2, pady=3 )
ttk.Button( itemFrame, text='OK', command=self.confirm ).grid( row=7, column=5, padx=2, pady=3 )
self.window.protocol( 'WM_DELETE_WINDOW', self.cancel ) # Overrides the 'X' close button.
itemFrame.pack( padx=4, pady=4 )
def updateStates( self, hexString, readingFromDOL, playAudio ): # Sets all stage label/buttons on the Stage Selection Window
bitString = bin( int( hexString, 16) )[2:].zfill(32)
mainFrameChildren = self.window.winfo_children()[0].winfo_children()
if self.window.state() == 'normal': windowOpen = True
else: windowOpen = False
for i, bit in enumerate( bitString ):
widget = mainFrameChildren[i]
if readingFromDOL: # This input is what's actually read from the DOL; so this will be a 'hard' set (no pending status)
if bit == '1': self.setState( 'enabled', widget )
else: self.setState( 'disabled', widget )
widget.revertTo = ''
else: # The input is from some button selection; these settings will be pending a save operation (or reverting a previous selection).
if windowOpen: # Since this update is not a reflection of what's in the DOL, we want it to be cancelable.
widget.revertTo = widget.state # This is used for the cancel and 'X' close buttons to revert changes.
else: widget.revertTo = '' # The window is closed. This should be cleared so that opening and then closing/canceling doesn't undo this update.
if bit == '1':
if widget.state == 'pendingDisable': self.setState( 'enabled', widget )
elif widget.state == 'disabled': self.setState( 'pendingEnable', widget )
else:
if widget.state == 'pendingEnable': self.setState( 'disabled', widget )
elif widget.state == 'enabled': self.setState( 'pendingDisable', widget )
if playAudio: playSound( 'menuChange' )
def setState( self, state, widget ): # Sets a single specific stage label/button on the window
# Sets a single specific stage label/button on the window
if state == 'pendingEnable':
stateColor = '#aaffaa' # light green
elif state == 'pendingDisable':
stateColor = '#ee9999' # light red
elif state == 'enabled':
stateColor = '#77cc77' # solid green
else: # i.e. 'disabled'
stateColor = 'SystemButtonFace' # The default (disabled) color for label backgrounds
widget['bg'] = stateColor
widget.state = state
def itemClicked( self, event ):
widget = event.widget
widget.revertTo = widget.state # This is used for the cancel and 'X' close buttons to revert changes.
if widget.state != 'unavailable':
if widget.state == 'pendingEnable': state = 'disabled'
elif widget.state == 'pendingDisable': state = 'enabled'
elif widget.state == 'enabled': state = 'pendingDisable'
elif widget.state == 'disabled': state = 'pendingEnable'
else: state = 'disabled' # Failsafe reset.
self.setState( state, widget )
checkForPendingChanges()
playSound( 'menuChange' )
def cancel( self ): # Undo any state changes made to the label widgets, and then close the window.
# Iterate over each label, revert any changes it had, reset its revertTo property, and close the window.
for widget in self.window.winfo_children()[0].winfo_children()[:32]:
if widget.revertTo != '':
self.setState( widget.revertTo, widget )
widget.revertTo = ''
playSound( 'menuBack' )
self.window.withdraw()
def confirm( self ):
# Rebuild the bit string and convert it to hex.
itemBitStr = ''
for item in self.window.winfo_children()[0].winfo_children()[:32]: # Iterate throught the first 32 widgets, all of which are item Labels.
if item.state == 'pendingEnable' or item.state == 'enabled': itemBitStr += '1'
else: itemBitStr += '0'
item.revertTo = ''
# Convert to int using a base of 2, then convert to hex, padded with 0s to 8 characters (albeit the value should never need padding).
newValue = toHex( int(itemBitStr,2), 8 )
currentGameSettingsValues['itemToggleSetting'].set( newValue )
currentGameSettingsValues['itemFrequencySetting'].set( itemFrequencyMimic.get() )
updateDefaultGameSettingWidget( 'itemToggleControl', newValue )
self.window.withdraw()
def reflectItemFrequencyChanges( self, itemFrequencyDisplay, value ):
itemFrequencyDisplay.revertTo = currentGameSettingsValues['itemFrequencySetting'].get()
updateDefaultGameSettingWidget( 'itemFrequencyDisplay', value )
class rumbleSelectWindow( object ):
def __init__( self ):
if root.rumbleSelectionWindow: root.rumbleSelectionWindow.deiconify()
else:
rumbleSelectionWindow = Toplevel()
rumbleSelectionWindow.title( " Rumble Selection" )
rumbleSelectionWindow.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
root.rumbleSelectionWindow = rumbleSelectionWindow
rumbleSelectionWindow.buttons = {} # Used by the updateDefaultGameSettingWidget function
# Calculate the spawning position of the window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( root )[2:]
rumbleSelectionWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 240) + '+' + str(rootDistanceFromScreenTop + 170) )
for player in ( '1', '2', '3', '4' ):
# Create the On/Off toggle button
button = ttk.Button( rumbleSelectionWindow, textvariable=currentGameSettingsValues['p'+player+'RumbleSetting'], command=lambda p=player: self.toggleRumble(p) )
rumbleSelectionWindow.buttons['p'+player+'RumbleControl'] = button
button.player = player
button.grid( column=player, row=0, padx=3, pady=8 )
# Add a P1/P2/etc image below the button
try:
ttk.Label( rumbleSelectionWindow, image=imageBank['p'+player+'Indicator'] ).grid( column=player, row=1, padx=7, pady=(0, 8) )
except:
print 'Unable to load the p'+player+'Indicator image.'
pass
rumbleSelectionWindow.protocol( 'WM_DELETE_WINDOW', self.close ) # Overrides the 'X' close button.
playSound( 'menuChange' )
def toggleRumble( self, player ):
# Get the respective StringVar
var = currentGameSettingsValues['p'+player+'RumbleSetting']
if var.get() == 'Off': var.set( 'On' )
else: var.set( 'Off' )
print 'updating to', var.get()
updateDefaultGameSettingWidget( 'p'+player+'RumbleControl', var.get() )
def close( self ):
playSound( 'menuBack' )
root.rumbleSelectionWindow.withdraw()
def onUpdateDefaultGameSettingsOnlyToggle():
# Disable/Enable the Mods Library tab
if onlyUpdateGameSettings.get(): mainNotebook.tab( 0, state='disabled' )
else:
mainNotebook.tab( 0, state='normal' )
root.update() # Force an update to the GUI, to reflect that the checkbox has been pressed before starting the library scan
scanModsLibrary( playAudio=False )
playSound( 'menuChange' )
# Save this setting
settings.set( 'General Settings', 'onlyUpdateGameSettings', str(onlyUpdateGameSettings.get()) )
saveOptions()
#========================================#
# ~ ~ Tools Tab and other calculations ~ ~ #
#========================================#
# Avert yer eyes! todo: write better functions
def floatToHex( input ): return '0x' + hex( struct.unpack('<I', struct.pack('<f', Decimal(input)))[0] )[2:].upper()
def doubleToHex( input ): return '0x' + hex( struct.unpack('<Q', struct.pack('<d', Decimal(input)))[0] ).replace('L', '')[2:].upper()
def convertDec( event, textvariable ):
decimalFieldInput = ''.join( textvariable.get().split() ) # Removes whitespace
if not isNaN( decimalFieldInput ):
if float( decimalFieldInput ).is_integer():
hexNum.set( '0x' + hex(int(Decimal(decimalFieldInput)))[2:].upper() )
else: hexNum.set( 'n/a (not an integer)' )
floatNum.set( floatToHex( decimalFieldInput ).replace('L', '') )
doubleNum.set( doubleToHex( decimalFieldInput ) )
else:
hexNum.set('')
floatNum.set('')
doubleNum.set('')
def convertHex( event, textvariable ):
hexFieldInput = ''.join( textvariable.get().replace('0x', '').split() ) # Removes whitespace
if validHex( hexFieldInput ):
intFieldInput = int( hexFieldInput, 16 )
decimalNum.set( intFieldInput )
floatNum.set( floatToHex(intFieldInput) )
doubleNum.set( doubleToHex(intFieldInput) )
else:
decimalNum.set('')
floatNum.set('')
doubleNum.set('')
def convertFloat( event, textvariable ):
input = ''.join( textvariable.get().replace('0x', '').split() ) # Removes whitespace
if validHex( input ) and len( input ) == 8:
decFloat = struct.unpack('>f', input.decode('hex'))[0]
decimalNum.set( decFloat )
if float(decFloat).is_integer(): hexNum.set( '0x' + hex(int(Decimal(decFloat)))[2:].upper() )
else: hexNum.set( 'n/a (not an integer)' )
doubleNum.set( doubleToHex( decFloat ) )
else:
decimalNum.set('')
hexNum.set('')
doubleNum.set('')
def convertDouble( event, textvariable ):
input = ''.join( textvariable.get().replace('0x', '').split() ) # Removes whitespace
if validHex( input ) and len( input ) == 16:
decFloat = struct.unpack('>d', input.decode('hex'))[0]
decimalNum.set( decFloat )
if float(decFloat).is_integer(): hexNum.set( '0x' + hex(int(Decimal(decFloat)))[2:].upper() )
else: hexNum.set( 'n/a (not an integer)' )
floatNum.set( floatToHex( decFloat ).replace('L', '') )
else:
decimalNum.set('')
hexNum.set('')
floatNum.set('')
def offsetInRAM( dolOffset, sectionInfo ): # todo: write into dolInitializer method
""" Converts the given DOL offset to the equivalent location in RAM once the DOL file is loaded. """
ramAddress = -1
# Determine which section the DOL offset is in, and then get that section's starting offsets in both the dol and RAM.
for section in sectionInfo.values():
if dolOffset >= section[0] and dolOffset < (section[0] + section[2]):
sectionOffset = dolOffset - section[0] # Get the offset from the start of the DOL section.
ramAddress = section[1] + sectionOffset # Add the section offset to the RAM's start point for that section.
break
return ramAddress
def offsetInDOL( ramOffset, sectionInfo ): # todo: write into dolInitializer method
""" Converts the given integer RAM address (location in memory) to the equivalent DOL file integer offset.
ramOffset should already be relative to the base address (-0x80000000). """
dolOffset = -1
# Determine which section the address belongs in, and then get that section's starting offsets.
for section in sectionInfo.values():
if ramOffset >= section[1] and ramOffset < (section[1] + section[2]):
sectionOffset = ramOffset - section[1] # Get the offset from the start of the section.
dolOffset = section[0] + sectionOffset # Add the section offset to the RAM's start point for that section.
break
return dolOffset
def normalizeDolOffset( offsetString, dolObj=None, returnType='int' ):
""" Converts a hex offset string to an int, and converts it to a DOL offset if it's a RAM address.
dolObj is an instance of the 'dol' class, representing a DOL file and its extrapolated properties. """
# Use the default/global DOL if one is not specified.
if not dolObj: dolObj = dol
offsetString = offsetString.replace( '0x', '' ).strip()
problemDetails = ''
dolOffset = -1
if len( offsetString ) == 8 and offsetString.startswith( '8' ): # Looks like it's a RAM address; convert it to a DOL offset int
offset = int( offsetString[1:], 16 )
if offset >= 0x3100:
if offset < dolObj.maxRamAddress:
dolOffset = offsetInDOL( offset, dolObj.sectionInfo )
# Check that the offset was found (it's possible the address is between text/data sections)
if dolOffset == -1:
problemDetails = ', because the RAM address does not have an equivalent location in the DOL.'
else: problemDetails = ', because the RAM address is too big.'
else: problemDetails = ', because the RAM address is too small.'
if returnType == 'string' and not problemDetails:
dolOffset = uHex( dolOffset )
else:
if returnType == 'string': # Already in the desired format; no need for conversion
dolOffset = '0x' + offsetString.upper()
else:
offset = int( offsetString, 16 )
if offset >= 0x100:
if offset < dolObj.maxDolOffset: dolOffset = offset
else: problemDetails = ', because the DOL offset is too big.'
else: problemDetails = ', because the DOL offset is too small.'
if problemDetails:
msg( 'Problem detected while processing the offset 0x' + offsetString + '; it could not be converted to a DOL offset' + problemDetails )
return dolOffset
def normalizeRamAddress( offsetString, dolObj=None, returnType='int' ):
""" Converts a hex offset string to an int, and converts it to a RAM address if it's a DOL offset.
dolObj is an instance of the 'dol' class, representing a DOL file and its extrapolated properties. """
# Use the default/global DOL if one is not specified.
if not dolObj: dolObj = dol
offsetString = offsetString.replace( '0x', '' ).strip()
problemDetails = ''
ramAddress = -1
if len( offsetString ) == 8 and offsetString.startswith( '8' ):
if returnType == 'string': # Already in the desired format; no need for conversion
ramAddress = '0x' + offsetString.upper()
else:
offset = int( offsetString[1:], 16 )
if offset >= 0x3100:
if offset < dolObj.maxRamAddress: ramAddress = offset
else: problemDetails = ', because the RAM address is too big.'
else: problemDetails = ', because the RAM address is too small.'
else: # Looks like it's a DOL offset; convert it to a RAM address int
offset = int( offsetString, 16 )
if offset >= 0x100:
if offset < dolObj.maxDolOffset: ramAddress = offsetInRAM( offset, dolObj.sectionInfo )
else: problemDetails = ', because the DOL offset is too big.'
else: problemDetails = ', because the DOL offset is too small.'
if returnType == 'string' and not problemDetails:
ramAddress = '0x8' + toHex( ramAddress, 7 )
if problemDetails:
msg( 'Problem detected while processing the offset, 0x' + offsetString + '; it could not be converted to a DOL offset' + problemDetails )
return ramAddress
def calcBranchDistance( fromDOL, toDOL ):
start = offsetInRAM( fromDOL, dol.sectionInfo )
end = offsetInRAM( toDOL, dol.sectionInfo )
if start == -1:
msg( 'Invalid input for branch calculation: "from" value (' + hex(fromDOL) + ') is out of range.' )
return -1
elif end == -1:
msg( 'Invalid input for branch calculation: "to" value (' + hex(toDOL) + ') is out of range.' ) #.\n\nTarget DOL Offset: ' + hex(toDOL) )
return -1
else:
return end - start
def assembleBranch( branchInstruction, branchDistance ):
branchInstruction = branchInstruction.lower().strip() # Normalize to lower-case without whitespace
useAssembler = False
# Determine whether the branch instruction is known (and set required flags) or if it needs to be sent to pyiiasmh for evaluation.
if branchInstruction == 'b': pass
elif branchInstruction == 'ba': # Interpret the address as absolute
branchDistance += 2
elif branchInstruction == 'bl': # Store the link register
branchDistance += 1
elif branchInstruction == 'bal' or branchInstruction == 'bla': # Interpret the address as absolute and store the link register
branchDistance += 3
else: useAssembler == True # Last resort, since this will take much longer
if useAssembler:
fullInstruction = branchInstruction + ' ' + str( branchDistance ) + '\n' # newLine char prevents an assembly error message.
branch, errors = customCodeProcessor.assemble( fullInstruction )
if errors or len( branch ) != 8:
return '48000000' # Failsafe; so that dol data cannot be corrupted with non-hex data.
else:
# Return the hex for a hard (unconditional) branch (b/ba/bl/bla).
if branchDistance >= 0: # Determine if the branch is going forward or backward in RAM.
branch = '48' + toHex( branchDistance, 6 ) # Converts to hex and then pads to 6 characters
else:
branch = '4B' + toHex( (0x1000000 + branchDistance), 6 )
return branch
def getBranchDistance( branchHex ):
opCode = branchHex[:2].lower()
def round_down( num, divisor ): # Rounds down to the closest multiple of the second argument.
return num - ( num % divisor )
branchDistance = round_down( int(branchHex[2:], 16), 4 ) # This will exclude the ba & bl flags.
if opCode == '4b': branchDistance = -( 0x1000000 - branchDistance )
return branchDistance
def getBranchTargetDolOffset( branchOffset, branchHex ):
""" Calculates a target DOL offset for a given branch. """
branchDistance = getBranchDistance( branchHex )
ramOffset = offsetInRAM( branchOffset, dol.sectionInfo )
return offsetInDOL( ramOffset + branchDistance, dol.sectionInfo )
def convertDolOffset( event, showLowerLimitErrors=False ):
if event.keysym == 'Return':
showLowerLimitErrors = True # The user is probably trying to trigger/force an update
# Filter the input; remove all whitespace and the '0x' hex indicator
userInput = ''.join( event.widget.get().replace( '0x', '' ).split() )
if not userInput: return
# Clear all entry fields, except for the one currently being used for input. And get the DOL revision that this is for
revision = ''
for widget in offsetConverter.winfo_children():
if widget != event.widget and widget.winfo_class() == 'TEntry': widget.delete( 0, 'end' )
elif widget == event.widget:
revision = lastWidget['text'] # You may see a linting error here, but it's fine
lastWidget = widget
ramToDolNotes.set( '' )
if not revision: # failsafe; shouldn't be possible
msg( 'Unable to determine a revision for this offset conversion.' )
return
# Prelim validation
if not validHex( userInput ):
ramToDolNotes.set( 'Invalid hex' )
return
# Load the DOL for this revision (if one is not already loaded), so that its section info can be used to determine a respective RAM Address
vanillaDol = loadVanillaDol( revision )
# Convert the given hex string to a decimal integer, then convert it to a RAM Address and update the RAM Address text field.
dolOffset = int( userInput, 16 )
if dolOffset < 0x100: # Space in the DOL prior to this is the file's header, which is not loaded into RAM
if showLowerLimitErrors: ramToDolNotes.set( 'Input too small' )
elif dolOffset >= vanillaDol.maxDolOffset:
ramToDolNotes.set( 'Input too big' )
else:
ramAddress = offsetInRAM( dolOffset, vanillaDol.sectionInfo )
if ramAddress == -1: ramToDolNotes.set( 'Not Found' ) #offsetConverter.winfo_children()[1].insert( 0, 'Not found' )
else: offsetConverter.winfo_children()[1].insert( 0, '0x8' + toHex( ramAddress, 7 ) )
def convertRamOffset( event, showLowerLimitErrors=False ):
if event.keysym == 'Return': showLowerLimitErrors = True # User probably trying to trigger/force an update
# Remove all whitespace and the '0x' hex indicator
userInput = ''.join( event.widget.get().replace( '0x', '' ).split() )
if not userInput: return
# Clear all entry fields, except for the one currently being used for input
for widget in offsetConverter.winfo_children():
if widget != event.widget and widget.winfo_class() == 'TEntry': widget.delete( 0, 'end' )
ramToDolNotes.set( '' )
# Prelim validation
if not validHex( userInput ):
ramToDolNotes.set( 'Invalid hex' )
return
# Convert the given hex string to a decimal integer
if len( userInput ) == 8 and userInput.startswith( '8' ): userInput = userInput[1:]
ramAddress = int( userInput, 16 )
# More validation; make sure the input is not too small
if ramAddress < 0x3100: # Space in the DOL prior to this is the file's header, which is not loaded into RAM
if showLowerLimitErrors: ramToDolNotes.set( 'Input too small' )
return
# Iterate over the DOL revisions shown, and update their offset text fields
label = None
revision = ''
for widget in offsetConverter.winfo_children()[3:]:
if widget.winfo_class() == 'TLabel':
label = widget # Remembers the previous (label) widget when moving on to the next (entry) widget
else: # Should be a TEntry
revision = label['text']
if not revision: # Failsafe
widget.insert( 0, 'Revision N/A' )
continue
# Load the DOL for this revision (if one is not already loaded), so that its section info can be used to determine a respective RAM Address
vanillaDol = loadVanillaDol( revision )
if not vanillaDol: # Failsafe
widget.insert( 0, 'vDOL N/A' )
continue
elif ramAddress >= vanillaDol.maxRamAddress:
widget.insert( 0, 'Input too big' )
else:
dolOffset = offsetInDOL( ramAddress, vanillaDol.sectionInfo )
if dolOffset == -1: widget.insert( 0, 'Not Found' ) #offsetConverter.winfo_children()[1].insert( 0, 'Not found' )
else: widget.insert( 0, uHex( dolOffset ) )
def convertCodeOffset( event ):
""" This takes the offset of some code in the DOL, and gets the offset for the same code
in the other DOL revisions. However, this function mostly handles I/O to the GUI,
while the convertOffsetToVersion() function below this performs the offset search. """
# Remove all whitespace and the '0x' hex indicator
userInput = ''.join( event.widget.get().replace( '0x', '' ).split() )
if not userInput: return
lsd = userInput[-1].lower() # Least Significant Digit
def setAllOutputs( text ): # Not counting the widget being used as input as an output
for widget in codeOffsetConverter.winfo_children():
if widget.winfo_class() == 'TEntry' and widget != event.widget:
widget.delete( 0, 'end' )
widget.insert( 0, text )
# Input validation; display 'Invalid hex' for the outputs
if not validHex( userInput ):
setAllOutputs( 'Invalid hex' )
return
# Check the Least Significant Digit for 4-byte alignment
elif not (lsd == '0' or lsd == '4' or lsd == '8' or lsd == 'c'):
msg( 'The offset must be a multiple of 4 bytes (i.e. ends with 0, 4, 8, or C).', 'Offset out of bounds' )
setAllOutputs( '' ) # Blank the outputs
return
else:
setAllOutputs( 'Searching....' ) # Temporary display
root.update() # Allows the GUI to update before continuing
# Determine whether the input offset is a RAM address
if len( userInput ) == 8 and userInput.startswith( '8' ): ramAddressGiven = True
else: ramAddressGiven = False
# Load the DOL for the input's revision (if one is not already loaded), to check its section info in convertOffsetToVersion
vanillaDol = loadVanillaDol( event.widget.revision )
if not vanillaDol: # Failsafe; this dol should always exist if this revision entry field was created
setAllOutputs( 'Error' )
return
# Iterate over the entry widgets
for widget in codeOffsetConverter.winfo_children():
if widget.winfo_class() == 'TEntry' and widget != event.widget: # Ignores the current/input entry
revision = widget.revision
widget.delete( 0, 'end' )
# Load the DOL for this output's revision (if one is not already loaded), to check its section info and min/max offsets
vanillaDol = loadVanillaDol( revision )
if not vanillaDol: # Failsafe
widget.insert( 0, 'vDOL N/A' )
continue
# Convert the value to an integer DOL offset, even if it's a ram address
offset = normalizeDolOffset( userInput, dolObj=vanillaDol )
# Make sure the offset is not out of range of the DOL's sections
if offset < 0x100:
widget.insert( 0, 'Input too small' )
continue
elif offset >= vanillaDol.maxDolOffset:
widget.insert( 0, 'Input too big' )
continue
# Perform the search for equivalent code between the input and output revisions
matchingOffset = convertOffsetToVersion( offset, event.widget.revision, revision )
# Output the result
if matchingOffset == -1: widget.insert( 0, 'Not Found' )
elif ramAddressGiven: # Convert back to a RAM address
ramAddress = offsetInRAM( matchingOffset, vanillaDol.sectionInfo )
widget.insert( 0, '0x8' + toHex( ramAddress, 7 ) )
else: widget.insert( 0, uHex(matchingOffset) )
root.update() # Allows the GUI to update before continuing
def convertOffsetToVersion( offset, sourceVer, targetVer ):
""" This function matches code at a given offset to code in another [target] DOL, to find the offset of the code in the target.
This is done by creating a map of opcodes before and after the target offset, which is grown on each search iteration, until
until only one match is found between the source and target DOL sections.
It's assumed that the target code/offset will be in the same section for all game versions. """
# Vanilla DOLs for the revisions being compared should already be loaded into memory.
sourceSectionInfo = originalDols[sourceVer].sectionInfo
targetSectionInfo = originalDols[targetVer].sectionInfo
# Get DOL data for the source DOL section
sourceDolSection = ''
sectionOffset = 0
for dolOffsetStart, _, sectionSize in sourceSectionInfo.values():
if offset >= dolOffsetStart and offset < (dolOffsetStart + sectionSize):
sectionOffset = offset - dolOffsetStart # The offset between the start of the section and the given input offset
sourceDolSection = originalDols[sourceVer].data[dolOffsetStart*2:(dolOffsetStart+sectionSize)*2]
break
# Get DOL data for the target DOL section
targetDolSection = ''
for dolOffsetStart, _, sectionSize in targetSectionInfo.values():
if offset >= dolOffsetStart and offset < (dolOffsetStart + sectionSize):
targetDolSection = originalDols[targetVer].data[dolOffsetStart*2:(dolOffsetStart+sectionSize)*2]
break
def matchCodeSample( testPoints, dolSection, samples ):
matchCount = 0
endOfMatch = 0
for nib in range( 0, len(dolSection), 8 ):
for i, point in enumerate(testPoints):
if dolSection[nib:nib+2] != point:
break
else:
if i+1 == len(testPoints): # This would be the last iteration, meaning a full match was found.
endOfMatch = nib
matchCount = matchCount + 1
else: nib = nib + 8
if not settingsFile.quickSearch: samples = samples/2
return ( matchCount, dolOffsetStart + endOfMatch/2 - samples*4 ) # Second return value is the file offset for the last match.
# Using the data section from the source file, find a unique code pattern centered at the given offset.
samples = 0
sourceMatchCount = 0
targetMatchCount = 0
testPoints = [ sourceDolSection[sectionOffset*2:sectionOffset*2+2] ] # This will be a map of test points, composed of command bytes.
while sourceMatchCount != 1 or targetMatchCount != 1:
samples = samples + 1
if settingsFile.quickSearch: # Add a command byte test point both before and after the target command
leadingPoint = sectionOffset*2 - samples*8
trailingPoint = sectionOffset*2 + samples*8
if leadingPoint >= 0: testPoints.insert( 0, sourceDolSection[leadingPoint:leadingPoint+2] )
testPoints.append( sourceDolSection[trailingPoint:trailingPoint+2] )
else: # For each match iteration (attempt at finding a unique code match), alternate between adding just one test point to the map at the front or back
if isEven( samples ):
trailingPoint = sectionOffset*2 + (samples - samples/2)*8 # Add a test point to the end of the map
testPoints.append( sourceDolSection[trailingPoint:trailingPoint+2] )
else:
leadingPoint = sectionOffset*2 - (samples - samples/2)*8 # Add a test point to the beginning of the map
if leadingPoint >= 0: testPoints.insert( 0, sourceDolSection[leadingPoint:leadingPoint+2] )
( sourceMatchCount, _ ) = matchCodeSample( testPoints, sourceDolSection, samples )
( targetMatchCount, targetOffset ) = matchCodeSample( testPoints, targetDolSection, samples )
# msg('found ' + str(testPoints) + ', \n' + str(sourceMatchCount) + ' times in the source, and ' + str(targetMatchCount) + \
# ' times in the target.\n\nFound in section ' + dolSection + ' at ' + hex(offset) + ', with a sample extension of ' + str(samples))
if sourceMatchCount == 0 or targetMatchCount == 0: break # or samples > 200: break # Early exit if no match is found, plus a failsafe.
if sourceMatchCount != 1 or targetMatchCount != 1:
offset = -1
else:
offset = targetOffset
# msg('offset: ' + hex(offset) + ', sourceVer: ' + sourceVer + ', targetVer: ' + targetVer + '\n\n' +
# 'found ' + str(testPoints) + ', \n' + str(sourceMatchCount) + ' times in the source, and ' + str(targetMatchCount) + \
# ' times in the target.\n\nFound in section ' + dolSection + ' at 0x' + hex(offset) + ', with a sample extension of ' + str(samples))
return offset
def text2hexConv( event ):
if event.widget == text2hex: # This is the ASCII text input field; need to convert the given text string to hex
hex2text.delete( '1.0', 'end' ) # Delete current contents of the hex input field
textInput = text2hex.get('1.0', 'end')[:-1]
try:
hex2text.insert( '1.0', textInput.encode('hex') )
except: pass
else:
hexInput = ''.join( hex2text.get( '1.0', 'end' ).split() ) # Removes whitespace
if validHex( hexInput ):
# Only process the string if the number of characters is even (but don't erase the resulting string othewise).
if isEven( len(hexInput) ):
text2hex.delete( '1.0', 'end' )
text2hex.insert( '1.0', hexInput.decode('hex') )
else:
text2hex.delete( '1.0', 'end' )
def menuText2hexConv( event ):
if event.widget == menuText2hex: # This is the text input field; need to convert the given text string to hex
hex2menuText.delete( '1.0', 'end' ) # Deletes the entire current contents
# Encode!
hexCodes = []
for character in unicode( menuText2hex.get( '1.0', 'end' )[:-1] ):
uniChar = character
for key, value in settingsFile.menuTextDictionary.items():
if value.decode( 'utf-8' ) == uniChar:
hexCodes.append( key )
break
else: hexCodes.append( '__' + u'\ufffd' + '_' ) # basically: '__?_' using the "unknown" unicode character
hex2menuText.insert( '1.0', ''.join(hexCodes) )
else: # This is the hex input field; need to generate text
hexInput = ''.join( hex2menuText.get( '1.0', 'end' ).split() ).lower() # Removes all whitespace
if validHex( hexInput ):
# Only process the string if the number of characters is even (but don't erase the resulting string othewise).
if isEven( len(hexInput) ):
menuText2hex.delete( '1.0', 'end' ) # Deletes the entire current contents
# Decode!
decodedCharacters = []
position = 0
currentByte = hexInput[:2]
while currentByte:
nextByte = hexInput[position+2:position+4]
if currentByte in settingsFile.menuTextDictionary:
decodedCharacters.append( settingsFile.menuTextDictionary[currentByte] )
position += 2
elif nextByte and currentByte + nextByte in settingsFile.menuTextDictionary:
decodedCharacters.append( settingsFile.menuTextDictionary[currentByte + nextByte] )
position += 4
else:
decodedCharacters.append( u'\ufffd' )
position += 2
currentByte = hexInput[position:position+2]
try:
menuText2hex.insert( '1.0', ''.join(decodedCharacters) )
except: pass
else:
menuText2hex.delete( '1.0', 'end' )
class CommandProcessor( object ):
""" This is an assembler/disassembler to translate between assembly and bytecode (hex/binary machine code).
Uses the PowerPC Embedded Application Binary Interface (EABI) binary utilities. """
# Build file paths to the binaries
assemblerPath = scriptHomeFolder + '\\bin\\eabi\\powerpc-eabi-as.exe'
linkerPath = scriptHomeFolder + '\\bin\\eabi\\powerpc-eabi-ld.exe'
objcopyPath = scriptHomeFolder + '\\bin\\eabi\\powerpc-eabi-objcopy.exe'
disassemblerPath = scriptHomeFolder + '\\bin\\eabi\\vdappc.exe'
tempBinFile = ''
def __init__( self ):
# Construct paths for temporary files
if not self.tempBinFile:
tempFilesFolder = scriptHomeFolder + '\\bin\\tempFiles\\'
self.tempBinFile = tempFilesFolder + 'code.bin' # Temp file for decompiling
if not os.path.exists( tempFilesFolder ): createFolders( tempFilesFolder)
# Validate the EABI file paths
for path in ( self.assemblerPath, self.linkerPath, self.objcopyPath, self.disassemblerPath ):
if not os.path.exists( path ):
print 'Missing PowerPC-EABI binaries!'
break
@staticmethod
def beautifyHex( rawHex ):
code = []
for block in xrange( 0, len(rawHex), 8 ):
# Check whether this is the first or second block (set of 4 bytes or 8 nibbles)
if block % 16 == 0: # Checks if evenly divisible by 16, meaning first block
code.append( rawHex[block:block+8] + ' ' )
else:
code.append( rawHex[block:block+8] + '\n' )
return ''.join( code ).rstrip()
def assemble( self, asmCode, beautify=False, includePaths=None, suppressWarnings=False ): # IPC interface to EABI-AS
args = [
self.assemblerPath, # Path to the assembler binary
"-mgekko", # Generate code for PowerPC Gekko (alternative to '-a32', '-mbig')
"-mregnames", # Allows symbolic names for registers
'-al', # Options for outputting assembled hex and other info to stdout
'--listing-cont-lines', '100000', # Sets the maximum number of continuation lines allowable in stdout (basically want this unlimited)
#'--statistics', # Prints additional assembly stats within the errors message; todo: will require some extra post processing
]
if suppressWarnings:
args.append( '--no-warn' )
# Set include directories, if requested
if includePaths:
for path in includePaths:
args.extend( ('-I', path) ) # Need a -I for each path to add
# Set object file output to 'nul', to prevent creation of the usual "a.out" elf object file
args.extend( ('-o', 'nul') )
# Pass the assembly code to the assembler using stdin.
assemblyProcess = subprocess.Popen( args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, # Redirect input & output pipes
creationflags=0x08000000 )
output, errors = assemblyProcess.communicate( input=asmCode + '\n' ) # Extra ending linebreak prevents a warning from assembler
if errors:
# Post-process the error message by removing the first line (which just says 'Assembler messages:') and redundant input form notices
errorLines = []
for line in errors.splitlines()[1:]:
if line.startswith( '{standard input}:' ):
errorLines.append( line.split( '}:', 1 )[1] )
continue
# Condense the file path and rebuild the rest of the string as it was
lineParts = line.split( ': ', 2 ) # Splits on first 2 occurrances only
fileName, lineNumber = lineParts[0].rsplit( ':', 1 )
errorLines.append( '{}:{}: {}'.format(os.path.basename(fileName), lineNumber, ': '.join(lineParts[1:])) )
errors = '\n'.join( errorLines )
if suppressWarnings:
return ( '', errors )
else:
cmsg( errors, 'Assembler Warnings' )
return self.parseAssemblerOutput( output, beautify=beautify )
def disassemble( self, hexCode, whitespaceNeedsRemoving=False ):
if whitespaceNeedsRemoving:
hexCode = ''.join( hexCode.split() )
# Create a temp file to send to the vdappc executable (doesn't appear to accept stdin)
try:
with open( self.tempBinFile, 'wb' ) as binFile:
binFile.write( bytearray.fromhex(hexCode) )
except IOError as e: # Couldn't create the file
msg( 'Unable to create "' + self.tempBinFile + '" temp file for decompiling!', 'Error' )
return ( '', e )
except ValueError as e: # Couldn't convert the hex to a bytearray
return ( '', e )
# Give the temp file to vdappc and get its output
process = subprocess.Popen( [self.disassemblerPath, self.tempBinFile, "0"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=0x08000000 )
output, errors = process.communicate() # creationFlags suppresses cmd GUI rendering
if errors:
print 'Errors detected during disassembly:'
print errors
return ( '', errors )
return self.parseDisassemblerOutput( output )
def parseAssemblerOutput( self, cmdOutput, beautify=False ):
#tic = time.time()
errors = ''
code = []
for line in cmdOutput.splitlines()[1:]: # Excludes first header line ('GAS Listing [filename] [page _]')
if not line: continue # Ignores empty lines
elif 'GAS LISTING' in line and 'page' in line: continue # Ignores page headers
elif line.startswith( '****' ): continue # Ignores warning lines
lineMinusAsm = line.split( '\t' )[0] # ASM commands are separated by a tab.
lineParts = lineMinusAsm.split() # Will usually be [ lineNumber, codeOffset, hexCode ]
linePartsCount = len( lineParts )
if not lineParts[0].isdigit(): # Assuming there must be at least one lineParts item at this point, considering 'if not line: continue'
print 'Error parsing assembler output on this line:'
print line, '\n'
code = []
errors = 'Problem detected while parsing assembly process output:\n' + cmdOutput
break
elif linePartsCount == 1: continue # This must be just a label
else:
hexCode = lineParts[-1]
if not beautify:
code.append( hexCode )
else: # Add line breaks and spaces for readability
if code:
lastBlockLength = len( code[-1] )
while lastBlockLength != 9 and hexCode != '': # Last part isn't a full 4 bytes; add to that
code[-1] += hexCode[:9 - lastBlockLength]
hexCode = hexCode[9 - lastBlockLength:] # Add the remaining to the next block (unless the last still isn't filled)
lastBlockLength = len( code[-1] )
if hexCode:
if len( code ) % 2 == 0: # An even number of blocks have been added (0 is even)
code.append( '\n' + hexCode )
else: code.append( ' ' + hexCode )
# ( ''.join( code ).lstrip(), errors )
# toc = time.time()
# print 'asm output parsing time:', toc - tic
return ( ''.join(code).lstrip(), errors ) # Removes first line break if present
def parseBranchHex( self, hexCode ):
""" Gets the branch operand (branch distance), and normalizes it. Avoids weird results from EABI.
Essentially, this does two things: strip out the link and absolute flag bits,
and normalize the output value, e.g. -0x40 instead of 0xffffffc0. """
# Mask out bits 26-32 (opcode), bit 25 (sign bit) and bits 1 & 2 (branch link and absolute value flags)
intValue = int( hexCode, 16 )
branchDistance = intValue & 0b11111111111111111111111100
# Check the sign bit 0x2000000, i.e. 0x10000000000000000000000000
if intValue & 0x2000000:
# Sign bit is set; this is a negative number.
return hex( -( 0x4000000 - branchDistance ) )
else:
return hex( branchDistance )
def parseDisassemblerOutput( self, cmdOutput ):
code = []
errors = ''
for line in cmdOutput.splitlines():
if not line:
print 'Found empty line during disassembly. problem?'
continue # Ignores empty lines
lineParts = line.split() # Expected to be [ codeOffset, hexCode, instruction, *[operands] ]
if len( lineParts ) < 3:
errors = 'Problem detected while parsing disassembly process output:\n' + cmdOutput
break
codeOffset, hexCode, instruction = lineParts[:3]
operands = lineParts[3:]
if not validHex( codeOffset.replace(':', '') ): # Error
errors = 'Problem detected while parsing disassembly process output:\n' + cmdOutput
break
elif operands and '0x' in operands[0]: # Apply some fixes
if instruction == '.word' and len( hexCode ) > 4: # Convert to .long if this is more than 4 characters (2 bytes)
lineParts[2] = '.long'
elif instruction in ( 'b', 'bl', 'ba', 'bla' ):
lineParts[3] = self.parseBranchHex( hexCode )
code.append( ' '.join(lineParts[2:]) ) # Grabs everything from index 2 onward (all assembly command parts)
if errors:
code = []
print errors
return ( '\n'.join(code), errors )
def preAssembleRawCode( self, codeLinesList, includePaths=None, discardWhitespace=True, suppressWarnings=False ):
""" This method takes assembly or hex code, filters out custom MCM syntaxes and comments, and assembles the code
using the PowerPC EABI if it was assembly. Once that is done, the MCM syntaxes are re-added back into the code,
which will be replaced (compiled to hex) later. If the option to include whitespace is enabled, then the resulting
code will be formatted with spaces after every 4 bytes and line breaks after every 8 bytes (like a Gecko code).
The 'includePaths' option specifies a list of [full/absolute] directory paths for .include imports.
Return codes from this method are:
0: Success
1: Compilation placeholder or branch marker detected in original code
2: Error during assembly
3: Include file(s) could not be found
"""
# Define placeholders for special syntaxes
compilationPlaceholder = 'stfdu f21,-16642(r13)' # Equivalent of 'deadbefe' (doesn't actually matter what this is, but must be in ASM in case of conversion)
branchMarker = 'DEADBEFE'
needsConversion = False
allSpecialSyntaxes = True
filteredLines = []
specialBranches = []
if type( codeLinesList ) != list:
codeLinesList = codeLinesList.splitlines()
# Filter out special syntaxes and remove comments
for rawLine in codeLinesList:
# Start off by filtering out comments
codeLine = rawLine.split( '#' )[0].strip()
if compilationPlaceholder in codeLine or branchMarker in codeLine:
# This should be a very rare problem, so I'm not going to bother with suppressing this
msg( 'There was an error while assembling this code (compilation placeholder detected):\n\n' + '\n'.join(codeLinesList), 'Assembly Error 01' )
return ( 1, '' )
elif isSpecialBranchSyntax( codeLine ): # e.g. "bl 0x80001234" or "bl <testFunction>"
# Store the original command.
if discardWhitespace: specialBranches.append( '|S|sbs__' + codeLine + '|S|' ) # Add parts for internal processing
else: specialBranches.append( codeLine ) # Keep the finished string human-readable
# Add a placeholder for compilation (important for other branch calculations). It will be replaced with the original command after the code is assembled to hex.
filteredLines.append( compilationPlaceholder )
elif containsPointerSymbol( codeLine ): # Identifies symbols in the form of <<functionName>>
# Store the original command.
if discardWhitespace: specialBranches.append( '|S|sym__' + codeLine + '|S|' ) # Add parts for internal processing
else: specialBranches.append( codeLine ) # Keep the finished string human-readable
# Add a placeholder for compilation (important for other branch calculations). It will be replaced with the original command after the code is assembled to hex.
filteredLines.append( compilationPlaceholder )
else:
# Whether it's hex or not, re-add the line to filteredLines.
filteredLines.append( codeLine )
allSpecialSyntaxes = False
# Check whether this line indicates that this code requires conversion.
if not needsConversion and codeLine != '' and not validHex( codeLine.replace(' ', '') ):
needsConversion = True
if allSpecialSyntaxes: # No real processing needed; it will be done when resolving these syntaxes
if discardWhitespace:
return ( 0, ''.join(specialBranches) )
else:
return ( 0, '\n'.join(specialBranches) )
filteredCode = '\n'.join( filteredLines ) # Joins the filtered lines with linebreaks.
# If this is ASM, convert it to hex.
if needsConversion:
conversionOutput, errors = self.assemble( filteredCode, beautify=True, includePaths=includePaths, suppressWarnings=suppressWarnings )
if errors:
# If suppressWarnings is True, there shouldn't be warnings in the error text; but there may still be actual errors reported
if not suppressWarnings:
cmsg( errors, 'Assembly Error 02' )
# Parse the error message for missing include files
missingIncludeFile = '' #todo: change GUI to be able to show a list of multiple missing include files?
for line in errors.splitlines():
splitLine = line.split( "Error: can't open" )
if len( splitLine ) == 2 and line.endswith( "No such file or directory" ):
missingIncludeFile = splitLine[1].split( 'for reading:' )[0].strip()
break
if missingIncludeFile:
return ( 3, missingIncludeFile )
else:
return ( 2, '' )
else:
newCode = conversionOutput
else:
newCode = filteredCode.replace( 'stfdu f21,-16642(r13)', 'DEADBEFE' )
# If any special commands were filtered out, add them back in.
if newCode != '' and specialBranches != []:
# The code should be in hex at this point, with whitespace
commandArray = newCode.split() # Split by whitespace
commandLineArray = []
specialBranchIndex = 0
if discardWhitespace:
for command in commandArray:
# Add the previously saved special command(s).
if command == branchMarker:
commandLineArray.append( specialBranches[specialBranchIndex] )
specialBranchIndex += 1
# Add just this command to this line.
else: commandLineArray.append( command )
newCode = ''.join( commandLineArray )
else: # Add some extra formatting for the user.
skip = False
i = 1
for command in commandArray:
if skip:
skip = False
i += 1
continue
# Add the previously saved special command(s).
if command == branchMarker: # This line was a special syntax
commandLineArray.append( specialBranches[specialBranchIndex] )
specialBranchIndex += 1
# Add this command and the next on the same line if neither is a special syntax.
elif i < len( commandArray ) and commandArray[i] != 'DEADBEFE':
commandLineArray.append( command + ' ' + commandArray[i] )
skip = True
# Add just this command to this line.
else: commandLineArray.append( command )
i += 1
newCode = '\n'.join( commandLineArray )
elif discardWhitespace:
newCode = ''.join( newCode.split() )
return ( 0, newCode.strip() )
def preDisassembleRawCode( self, codeLinesList, discardWhitespace=True ):
# Define placeholders for special syntaxes
compilationPlaceholder = 'DEADBEFE'
branchMarker = 'stfdu f21,-16642(r13)'
if type( codeLinesList ) == str:
codeLinesList = codeLinesList.splitlines()
# Filter out the special branch syntax, and remove comments.
needsConversion = False
allSpecialSyntaxes = True
filteredLines = []
specialBranches = []
for rawLine in codeLinesList:
# Remove comments and skip empty lines
codeLine = rawLine.split( '#' )[0].strip()
if codeLine == '': continue
elif compilationPlaceholder in codeLine or branchMarker in codeLine:
msg( 'There was an error while disassembling this code (compilation placeholder detected):\n\n' + codeLinesList, 'Disassembly Error 01' )
return ( 1, '' )
if isSpecialBranchSyntax( codeLine ): # e.g. "bl 0x80001234" or "bl <testFunction>"
# Store the original command.
specialBranches.append( codeLine )
# Add a placeholder for compilation (important for other branch calculations). It will be replaced with the original command after the code is assembled to hex.
filteredLines.append( compilationPlaceholder )
elif containsPointerSymbol( codeLine ): # Identifies symbols in the form of <<functionName>>
# Store the original command.
specialBranches.append( codeLine )
# Add a placeholder for compilation (important for other branch calculations). It will be replaced with the original command after the code is assembled to hex.
filteredLines.append( compilationPlaceholder )
else:
# Whether it's hex or not, re-add the line to filteredLines.
filteredLines.append( codeLine )
allSpecialSyntaxes = False
# Check whether this line indicates that this code requires conversion.
if not needsConversion and validHex( codeLine.replace(' ', '') ):
needsConversion = True
if allSpecialSyntaxes: # No real processing needed; it will be done when resolving these syntaxes
if discardWhitespace:
return ( 0, ''.join(specialBranches) )
else:
return ( 0, '\n'.join(specialBranches) )
filteredCode = '\n'.join( filteredLines ) # Joins the lines with linebreaks.
# If this is hex, convert it to ASM.
if needsConversion:
conversionOutput, errors = self.disassemble( filteredCode, whitespaceNeedsRemoving=True )
if errors:
cmsg( errors, 'Disassembly Error 02' )
return ( 2, '' )
else:
newCode = conversionOutput
else:
newCode = filteredCode.replace( 'DEADBEFE', 'stfdu f21,-16642(r13)' )
# If any special commands were filtered out, add them back in.
if newCode != '' and specialBranches != []:
# The code is in assembly, with commands separated by line
commandArray = newCode.splitlines()
commandLineArray = []
specialBranchIndex = 0
if discardWhitespace:
for command in commandArray:
# Add the previously saved special command(s).
if command == branchMarker:
commandLineArray.append( specialBranches[specialBranchIndex] )
specialBranchIndex += 1
# Add just this command to this line.
else: commandLineArray.append( command )
newCode = ''.join( commandLineArray )
else: # Add some extra formatting for the user.
# Replace special syntax placeholders with the previously saved special command(s)
for command in commandArray:
if command == branchMarker: # This line was a special syntax
commandLineArray.append( specialBranches[specialBranchIndex] )
specialBranchIndex += 1
# Add just this command to this line.
else: commandLineArray.append( command )
newCode = '\n'.join( commandLineArray )
elif discardWhitespace:
newCode = ''.join( newCode.split() )
# Replace a few choice ASM commands with an alternate syntax
#newCode = newCode.replace( 'lwz r0,0(r1)', 'lwz r0,0(sp)' ).replace( 'lwz r0,0(r2)', 'lwz r0,0(rtoc)' )
return ( 0, newCode.strip() )
@staticmethod
def resolveCustomSyntaxes( thisFunctionStartingOffset, rawCustomCode, preProcessedCustomCode, includePaths=None, rawCodeIsAssembly='unknown' ):
""" Replaces any custom branch syntaxes that don't exist in the assembler with standard 'b_ [intDistance]' branches,
and replaces function symbols with literal RAM addresses, of where that function will end up residing in memory.
This process may require two passes. The first is always needed, in order to determine all addresses and syntax resolutions.
The second may be needed for final assembly because some lines with custom syntaxes might need to reference other parts of
the whole source code (raw custom code), such as for macros or label branch calculations. """
debugging = False
if '|S|' not in preProcessedCustomCode: # Contains no special syntaxes; just return it
return ( 0, preProcessedCustomCode )
if debugging:
print '\nresolving custom syntaxes for code stored at', hex(thisFunctionStartingOffset)
standaloneFunctions = genGlobals['allStandaloneFunctions'] # Making it local for less look-ups
customCodeSections = preProcessedCustomCode.split( '|S|' )
rawCustomCodeLines = rawCustomCode.splitlines()
if rawCodeIsAssembly == 'unknown':
rawCodeIsAssembly = codeIsAssembly( rawCustomCodeLines )
requiresAssembly = False
resolvedAsmCodeLines = []
errorDetails = ''
byteOffset = 0
returnCode = 0
# Resolve individual syntaxes to finished assembly and/or hex
for i, section in enumerate( customCodeSections ):
if section.startswith( 'sbs__' ): # Something of the form 'bl 0x80001234' or 'bl <function>'; build a branch from this
section = section[5:] # Removes the 'sbs__' identifier
if debugging:
print 'recognized special branch syntax at function offset', hex( byteOffset ) + ':', section
if '+' in section:
section, offset = section.split( '+' ) # Whitespace around the + should be fine for int()
if offset.lstrip().startswith( '0x' ):
branchAdjustment = int( offset, 16 )
else: branchAdjustment = int( offset )
else: branchAdjustment = 0
branchInstruction, targetDescriptor = section.split()[:2] # Get up to two parts max
if isStandaloneFunctionHeader( targetDescriptor ): # The syntax references a standalone function (comments should already be filtered out).
targetFunctionName = targetDescriptor[1:-1] # Removes the </> characters
targetFunctionOffset = standaloneFunctions[targetFunctionName][0]
branchDistance = calcBranchDistance( thisFunctionStartingOffset + byteOffset, targetFunctionOffset )
if branchDistance == -1: # Fatal error; end the loop
errorDetails = 'Unable to calculate SF branching distance, from {} to {}.'.format( hex(thisFunctionStartingOffset + byteOffset), hex(targetFunctionOffset) )
break
else: # Must be a special branch syntax using a RAM address
startingRamOffset = offsetInRAM( thisFunctionStartingOffset + byteOffset, dol.sectionInfo )
if startingRamOffset == -1: # Fatal error; end the loop
errorDetails = 'Unable to determine starting RAM offset, from DOL offset {}.'.format( hex(thisFunctionStartingOffset + byteOffset) )
break
branchDistance = int( targetDescriptor, 16 ) - 0x80000000 - startingRamOffset
branchDistance += branchAdjustment
# Remember in case reassembly is later determined to be required
resolvedAsmCodeLines.append( '{} {}'.format(branchInstruction, branchDistance) )
# Replace this line with hex for the finished branch
if not requiresAssembly: # The preProcessed customCode won't be used if reassembly is required; so don't bother replacing those lines
customCodeSections[i] = assembleBranch( branchInstruction, branchDistance ) # Assembles these arguments into a finished hex string
# Check if this was the last section
if i + 1 == len( customCodeSections ):
returnCode = 100
byteOffset += 4
elif section.startswith( 'sym__' ): # Contains a function symbol; something like 'lis r3, (<<function>>+0x40)@h'; change the symbol to an address
section = section[5:]
if debugging:
print 'resolving symbol names in:', section
erroredFunctions = set()
# Determine the RAM addresses for the symbols, and replace them in the line
for name in containsPointerSymbol( section ):
# Get the dol offset and ultimate RAM address of the target function
targetFunctionOffset = standaloneFunctions[name][0]
ramAddress = offsetInRAM( targetFunctionOffset, dol.sectionInfo ) + 0x80000000 # 0x80000000 is the base address for GC/Wii games
if ramAddress == -1: # Fatal error; probably an invalid function offset was given, pointing to an area outside of the DOL
erroredFunctions.add( name )
address = "0x{0:0{1}X}".format( ramAddress, 8 ) # e.g. 1234 (int) -> '0x800004D2' (string)
section = section.replace( '<<' + name + '>>', address )
if erroredFunctions:
errorDetails = 'Unable to calculate RAM addresses for the following function symbols:\n\n' + '\n'.join( erroredFunctions )
break
if debugging:
print ' resolved to:', section
requiresAssembly = True
resolvedAsmCodeLines.append( section )
# Check if this was the last section
if i + 1 == len( customCodeSections ):
returnCode = 100
byteOffset += 4
else: # This code should already be pre-processed hex (assembled, with whitespace removed)
byteOffset += len( section ) / 2
if errorDetails: return ( 1, errorDetails )
# Assemble the final code using the full source (raw) code
if requiresAssembly and rawCodeIsAssembly:
if debugging:
print 'reassembling resolved code from source (asm) code'
# Using the original, raw code, remove comments, replace the custom syntaxes, and assemble it into hex
rawAssembly = []
for line in rawCustomCodeLines:
# Start off by filtering out comments and empty lines.
codeLine = line.split( '#' )[0].strip()
#if not codeLine: continue
if isSpecialBranchSyntax( codeLine ) or containsPointerSymbol( codeLine ): # Replace with resolved code lines
rawAssembly.append( resolvedAsmCodeLines.pop(0) )
else: rawAssembly.append( codeLine )
customCode, errors = customCodeProcessor.assemble( '\n'.join(rawAssembly), includePaths=includePaths, suppressWarnings=True )
if errors:
return ( 2, 'Unable to assemble source code with custom syntaxes.\n\n' + errors )
elif requiresAssembly: # Yet the raw code is in hex form
if debugging:
print 'assembling custom syntaxes separately from assembled hex'
# Assemble the resolved lines in one group (doing it this way instead of independently in the customCodeSections loop for less IPC overhead)
assembledResolvedCode, errors = customCodeProcessor.assemble( '\n'.join(resolvedAsmCodeLines), beautify=True, suppressWarnings=True )
resolvedHexCodeLines = assembledResolvedCode.split()
newCustomCodeSections = preProcessedCustomCode.split( '|S|' ) # Need to re-split this, since customCodeSections may have been modified by now
if errors:
return ( 3, 'Unable to assemble custom syntaxes.\n\n' + errors )
else:
# Add the resolved, assembled custom syntaxes back into the full custom code string
for i, section in enumerate( newCustomCodeSections ):
if section.startswith( 'sbs__' ) or section.startswith( 'sym__' ):
newCustomCodeSections[i] = resolvedHexCodeLines.pop( 0 )
if resolvedHexCodeLines == []: break
customCode = ''.join( newCustomCodeSections )
else: # Recombine the code lines back into one string. Special Branch Syntaxes have been assembled to hex
if debugging:
print 'resolved custom code using the preProcessedCustomCode lines'
customCode = ''.join( customCodeSections )
return ( returnCode, customCode )
def openModsLibrary():
""" Wrapper for the two "Open Mods Library Folder" buttons.
Wraps the 'openFolder' button, using the current Mods Library path. """
openFolder( getModsFolderPath() )
def openLibraryFile():
""" Called solely by the "Open this File" button on the Mods Library control panel.
Opens the text file for the tab currently in view on the Mods Library tab. """
currentlySelectedTab = getCurrentModsLibraryTab()
if currentlySelectedTab == 'emptyNotebook': pass # Failsafe; shouldn't be able to happen.
else:
frameForBorder = currentlySelectedTab.winfo_children()[0]
modsPanelInterior = frameForBorder.winfo_children()[0].interior # frameForBorder -> modsPanel.interior
firstMod = modsPanelInterior.winfo_children()[0] # Checking the first mod of the mods panel (should all have same source file)
# Open the file (or folder, if this is an AMFS)
webbrowser.open( firstMod.sourceFile )
#==========================================#
# ~ ~ Summary Tab Population and Sorting ~ ~ #
#==========================================#
def clearSummaryTab():
# Empty the treeviews
for item in modsSummaryTree.get_children(): modsSummaryTree.delete( item )
for item in standalonesSummaryTree.get_children(): standalonesSummaryTree.delete( item )
# Reset the totals labels
totalModsInstalledLabel.set( 'Mods Installed:' )
requiredStandaloneFunctionsLabel.set( 'Required Standalone Functions for the Installed Mods:' )
def addToInstallationSummary( modName='', modType='', summaryReport=None, geckoInfrastructure=False, isMod=True ):
if not summaryReport: summaryReport = []
childIndent = '\t- '
if geckoInfrastructure: # Add entries just for the codehandler and hook
if not modsSummaryTree.exists( 'geckoEnvironInfo' ):
modsSummaryTree.insert( '', 'end', iid='geckoEnvironInfo', text='Gecko Codehandler and Hook',
values=('GC', '', uHex(gecko.codehandlerLength + 4), uHex(gecko.codehandlerLength)), tags=('mainEntry', 'notMod') )
modsSummaryTree.insert( 'geckoEnvironInfo', 'end', text=childIndent + 'Codehandler Hook',
values=('GC', uHex(gecko.hookOffset), '0x4', '0') )
modsSummaryTree.insert( 'geckoEnvironInfo', 'end', text=childIndent + 'Gecko Codehandler',
values=('GC', uHex(gecko.codehandlerRegionStart), uHex(gecko.codehandlerLength), uHex(gecko.codehandlerLength)) )
return
def formatLocationString( dolOffset ): # dolOffset is an int in this case
# Create a string for the location, converting to a RAM address if necessary
if modsSummaryTree.heading( 'location', 'text' ) == 'DOL Offset': return uHex( dolOffset )
else:
ramAddress = offsetInRAM( dolOffset, dol.sectionInfo )
return '0x8' + toHex( ramAddress, 7 )
modTypes = { 'static': 'SO', 'injection': 'IM', 'standalone': 'SF', 'gecko': 'GC' }
totalBytesChanged = 0
totalFreeSpaceUsed = 0
# Add this mod to the summary tree
if len( summaryReport ) == 1: # Create just one main entry for this mod (no children)
changeName, changeType, dolOffset, customCodeLength = summaryReport[0]
location = formatLocationString( dolOffset )
if changeType == 'static': freeSpaceUsed = '0'
else: freeSpaceUsed = uHex( customCodeLength )
modsSummaryTree.insert( '', 'end', text=modName, tags=('mainEntry'), values=(modTypes[changeType], location, uHex(customCodeLength), freeSpaceUsed) )
else:
if isMod: tags = ( 'mainEntry' )
else: tags = ( 'mainEntry', 'notMod' ) # First tag is for styling, second tag prevents it from being counted among total mods installed
parentModEntry = modsSummaryTree.insert( '', 'end', text=modName, tags=tags )
# Add each change for the parent mod entry above, as children
for changeName, changeType, dolOffset, customCodeLength in summaryReport:
totalBytesChanged += customCodeLength
location = formatLocationString( dolOffset )
# Check how much free space is used by this code change
if changeType == 'static' or ( changeType == 'injection' and changeName == 'Branch' ): freeSpaceUsed = '0'
else: freeSpaceUsed = uHex( customCodeLength )
modsSummaryTree.insert( parentModEntry, 'end', text=childIndent + changeName, values=(modTypes[changeType], location, uHex(customCodeLength), freeSpaceUsed) )
if changeType != 'static': totalFreeSpaceUsed += customCodeLength
if changeType == 'injection': totalBytesChanged += 4 # For the branch
elif changeType == 'standalone':
functionName = changeName.split(':')[1].lstrip()
# Add this function to the Standalone Functions summary tree, if it doesn't already exist
if not standalonesSummaryTree.exists( functionName ):
standalonesSummaryTree.insert( '', 'end', iid=functionName, text=functionName, tags=('mainEntry') ) # values=(refCount, location)
standalonesSummaryTree.insert( functionName, 'end', text='\tMods using this function:' )
standalonesSummaryTree.insert( functionName, 'end', text='\t '+modName, tags=('modName') ) #, values=('', location)
# Update the reference count of the function
standalonesSummaryTree.item( functionName, values=(len( standalonesSummaryTree.get_children(functionName) ) - 1, location) )
modsSummaryTree.item( parentModEntry, values=(modTypes[modType], '', uHex(totalBytesChanged), uHex(totalFreeSpaceUsed) ) )
def updateSummaryTabTotals():
# Store a snapshot of the items in the treeviews, to help with sorting later. To be filled with ( iid, childIids, childNames )
modsSummaryTree.originalSortOrder = []
total1byteOverwrites = 0
total2byteOverwrites = 0
total4byteOverwrites = 0
totalNbyteOverwrites = 0
totalInjections = 0
injectionsOverhead = 0
staticGeckoOverhead = 0
totalFreeSpaceUsed = 0
grandTotalStandalonesSpaceUsed = 0
sfItems = []
modsSummaryTreeChildren = modsSummaryTree.get_children()
# Calculate total bytes changed and space used by SAFs by iterating over the items in the mods summary tree
for item in modsSummaryTreeChildren:
changeType, _, _, freeSpaceUsed = modsSummaryTree.item( item, 'values' ) # Full tuple is ( changeType, location, bytesChanged, freeSpaceUsed )
totalFreeSpaceUsed += int( freeSpaceUsed, 16 )
childIids = modsSummaryTree.get_children( item )
if changeType == 'SF':
# Iterate over the changes for this mod, looking for standalone functions
for child in childIids:
childChangeType, _, _, childFreeSpaceUsed = modsSummaryTree.item( child, 'values' )
if childChangeType == 'SF' and modsSummaryTree.item( child, 'text' ) not in sfItems:
grandTotalStandalonesSpaceUsed += int( childFreeSpaceUsed, 16 )
sfItems.append( modsSummaryTree.item( child, 'text' ) )
# Store this mod and it's children to remember their order (and original child names)
childNames = []
for child in childIids:
childText = modsSummaryTree.item( child, 'text' )
childNames.append( childText )
if childText == '\t- Branch': continue # Injection branch
# Track a few metrics
childChangeType, _, bytesChanged, _ = modsSummaryTree.item( child, 'values' )
bytesChanged = int( bytesChanged, 16 )
if childChangeType == 'SO':
if bytesChanged == 1:
total1byteOverwrites += 1
staticGeckoOverhead += 8
elif bytesChanged == 2:
total2byteOverwrites += 1
staticGeckoOverhead += 8
elif bytesChanged == 4:
total4byteOverwrites += 1
staticGeckoOverhead += 8
else:
totalNbyteOverwrites += 1
staticGeckoOverhead += 8 + bytesChanged
if childChangeType == 'IM':
totalInjections += 1
if bytesChanged % 8 == 0: injectionsOverhead += 8
else: # Code not an even multiple of 8 bytes; would require an extra nop if in Gecko code form
injectionsOverhead += 0xC
modsSummaryTree.originalSortOrder.append( ( item, childIids, childNames ) )
# Count the number of mods installed, excluding extra items in the summary tree that aren't actually mods
totalModsInstalled = len( modsSummaryTreeChildren ) - len( modsSummaryTree.tag_has( 'notMod' ) )
#print 'non mods found:', len( modsSummaryTree.tag_has( 'notMod' ) ), ':', modsSummaryTree.tag_has( 'notMod' )
if injectionsOverhead > 0 or staticGeckoOverhead > 0:
totalCodelistOverhead = 16 + injectionsOverhead + staticGeckoOverhead # +16 for the codelist wrapper
else:
totalCodelistOverhead = 0
print ''
print 'Total mods installed:', totalModsInstalled
print 'Total injections:', totalInjections
print ' overhead:', uHex( injectionsOverhead )
print 'Total static overwrites:', total1byteOverwrites + total2byteOverwrites + total4byteOverwrites + totalNbyteOverwrites
print ' 1 byte:', total1byteOverwrites
print ' 2 byte:', total2byteOverwrites
print ' 4 byte:', total4byteOverwrites
print ' >4 byte:', totalNbyteOverwrites
print ' overhead:', uHex( staticGeckoOverhead )
print 'Total Gecko codelist overhead:', uHex( totalCodelistOverhead ), ' (' + humansize(totalCodelistOverhead) + ')'
combinedGeckoSpace = gecko.codehandlerLength + totalCodelistOverhead
print ' + Codehandler size (' + uHex(gecko.codehandlerLength) + ') =', uHex( combinedGeckoSpace ), ' (' + humansize(combinedGeckoSpace) + ')'
print ''
# Update the mod & SF total labels
totalModsInstalledLabel.set( 'Mods Installed (' + str(totalModsInstalled) + ' total; ' + uHex(totalFreeSpaceUsed) + ' bytes used):' )
requiredStandaloneFunctionsLabel.set( 'Required Standalone Functions for the Installed Mods'
' (' + str(len(sfItems)) + ' total; ' + uHex(grandTotalStandalonesSpaceUsed) + ' bytes used):' )
# Reorder the mods if they should be sorted by offset
if settings.getboolean( 'General Settings', 'sortSummaryByOffset' ):
sortModsSummaryByOffset( True )
def updateSummaryTreeSelection( event, itemToSelect='' ):
""" Updates which treeview item(s) currently have the 'selected' tag.
If no iid was given, this uses the item that was last clicked on. """
treeview = event.widget
# Check which mods are currently selected, and remove the "selected" tag from them.
for iid in treeview.tag_has( 'selected' ):
currentTags = list( treeview.item( iid, 'tags' ) )
currentTags.remove( 'selected' )
treeview.item( iid, tags=currentTags )
# Add the 'selected' tag to the currently selected item
if not itemToSelect: itemToSelect = treeview.identify_row( event.y )
treeview.item( itemToSelect, tags=list(treeview.item( itemToSelect, 'tags' )) + ['selected'] )
def onModSummaryTreeRightClick( event ):
""" If this was the DOL Offset ("location") column header, toggle all values between DOL Offset or RAM Address.
Or if this click was on anything else, it was probably on a mod in the tree, so show a context menu. """
if modsSummaryTree.identify_region( event.x, event.y ) == 'heading':
if modsSummaryTree.identify_column( event.x ) == '#2': # The Offset column header was clicked on.
toggleSummaryLocationDisplayType()
else: # A mod was probably clicked on
modsSummaryContextMenu.show( event )
def onStandalonesSummaryTreeRightClick( event ):
# Check what item has been right-clicked on
selectedItem = standalonesSummaryTree.identify_row( event.y ) # Empty string if no treeview item is under the mouse
if selectedItem:
# Bring up the context menu. Include 'View in...' options if a line describing a mod was clicked on.
thisItemIsAModName = standalonesSummaryTree.tag_has( 'modName', selectedItem )
standalonesSummaryContextMenu = summaryContextMenu( root, forStandalones=True, enableSearchFeatures=thisItemIsAModName )
standalonesSummaryContextMenu.show( event, selectedItem=selectedItem )
def toggleSummaryLocationDisplayType():
# Toggle the values for this column between DOL Offsets and RAM Addresses
if modsSummaryTree.heading( 'location', 'text' ) == 'DOL Offset':
def conversionFunction( offsetString ): return normalizeRamAddress( offsetString, returnType='string' )
modsSummaryTree.heading( '#2', text='RAM Address' )
settings.set( 'General Settings', 'summaryOffsetView', 'ramAddress' )
else:
def conversionFunction( offsetString ): return normalizeDolOffset( offsetString, returnType='string' )
modsSummaryTree.heading( '#2', text='DOL Offset' )
settings.set( 'General Settings', 'summaryOffsetView', 'dolOffset' )
for modIid in modsSummaryTree.get_children():
changeType, location, bytesChanged, freeSpaceUsed = modsSummaryTree.item( modIid, 'values' )
if location != '':
newValues = ( changeType, conversionFunction(location), bytesChanged, freeSpaceUsed )
modsSummaryTree.item( modIid, values=newValues )
for childIid in modsSummaryTree.get_children( modIid ):
changeType, location, bytesChanged, freeSpaceUsed = modsSummaryTree.item( childIid, 'values' )
newValues = ( changeType, conversionFunction(location), bytesChanged, freeSpaceUsed )
modsSummaryTree.item( childIid, values=newValues )
saveOptions()
class summaryContextMenu( Menu ):
""" Primarily, the benefit of using a class and creating instances for the context menus like this is that
we can easily pass values to the menu functions using self, usually avoiding lambdas or helper functions. """
def __init__( self, parent, forStandalones=False, enableSearchFeatures=True ):
if forStandalones:
Menu.__init__( self, parent, tearoff=False )
else: # For all other mods (static/injection/gecko) in the main treeview
Menu.__init__( self, parent, tearoff=False, postcommand=self.setCommandLabels ) # Keyboard selection
self.add_command( command=toggleSummaryLocationSortMethod ) # Will get its text from setViewModeCommand() # V
self.add_command( command=toggleSummaryLocationDisplayType ) # Same text change trick as above
self.add_separator()
if enableSearchFeatures:
self.add_command( label='View in Mods Library Tab', command=self.showInModLibrary ) # ?
self.add_command( label='View in Mod Construction Tab', command=self.showInModConstruction ) # ?
self.add_command( label='Copy Offset to Clipboard', underline=5, command=self.copyOffsetToClipboard ) # O
self.add_separator()
self.add_command( label='Expand All', underline=0, command=self.expandAll ) # E
self.add_command( label='Collapse All', underline=0, command=self.collapseAll ) # C
def setCommandLabels( self ):
# Set text for the Sort Method option
if settings.getboolean( 'General Settings', 'sortSummaryByOffset' ): # Keyboard selection
self.entryconfig( 0, label='Simple View', underline=7 ) # V
else:
self.entryconfig( 0, label='Advanced View (Sort Changes by Offset)', underline=9 ) # V
# Set text for the location Display Type option (DOL Offset vs. RAM Address)
if settings.get( 'General Settings', 'summaryOffsetView' ).lstrip()[0].lower().startswith( 'd' ): # 'd' for dolOffset
self.entryconfig( 1, label='Show RAM Addresses' )
else:
self.entryconfig( 1, label='Show DOL Offsets' )
def show( self, event, selectedItem=None ):
# Remember the coordinates and the target treeview widget
self.x = event.x
self.y = event.y
self.treeview = event.widget
# Move focus to the item under the mouse and assume that's the target item
if selectedItem:
self.selectedItem = selectedItem
else:
self.selectedItem = self.treeview.identify_row( self.y )
updateSummaryTreeSelection( event, self.selectedItem )
self.treeview.focus( self.selectedItem )
# Display the context menu at the current mouse coords
self.post( event.x_root, event.y_root )
def showInModLibrary( self ):
if not self.selectedItem:
msg( 'No item is selected.' )
return
parent = self.treeview.parent( self.selectedItem ) # Returns an empty string if it doesn't have a parent
tags = self.treeview.item( self.selectedItem, 'tags' )
if 'notMod' in tags: # These won't exist in the Mods Library
msg( 'This item represents changes that are not\npart of a mod in the Mods Library tab.' )
return
elif self.treeview != standalonesSummaryTree and not 'mainEntry' in tags and parent != '':
modItem = parent
else:
modItem = self.selectedItem
modName = self.getOrigName( modItem )
# Check if this item exists in the mods library, and go to it if it does
for mod in genGlobals['allMods']:
if mod.name == modName:
mainNotebook.select( modsLibraryTab )
goToMod( modName )
break
else: msg( 'This item does not exist in the Mods Library tab!' )
def showInModConstruction( self ):
if not self.selectedItem:
msg( 'No item is selected.' )
return
parent = self.treeview.parent( self.selectedItem ) # Returns an empty string if it doesn't have a parent
tags = self.treeview.item( self.selectedItem, 'tags' )
if 'notMod' in tags: # These won't exist in the Mods Library
msg( 'This item represents changes that are not\npart of a mod in the Mods Library tab.' )
return
elif self.treeview != standalonesSummaryTree and not 'mainEntry' in tags and parent != '':
modItem = parent
else:
modItem = self.selectedItem
modName = self.getOrigName( modItem )
# Iterate over the mods in the mods library tab to get the mod's info, and send that to the mod construction tab.
for mod in genGlobals['allMods']:
if mod.name == modName:
inspectMod( None, mod=mod )
break
else: msg( 'Unable to find this mod! The Mods Library may have been changed.' )
def getOrigName( self, treeviewItem ):
modName = self.treeview.item( treeviewItem, 'text' )
if self.treeview == modsSummaryTree and modName.startswith( 'Part of "'): # For cases of sorted entries (Advanced View, with separated code changes).
changeType = self.treeview.item( treeviewItem, 'values' )[0]
if changeType == 'SF' and modName.endswith( ')' ):
modName = modName.rsplit( ' (', 1 )[0][9:-1] # Removes the function name portion
elif modName.endswith( '"' ):
modName = modName[9:-1]
return modName.strip()
def copyOffsetToClipboard( self ):
if not self.selectedItem: msg( 'No mod is selected.' )
else:
# Item values on the main list are ( changeType, location, bytesChanged, freeSpaceUsed )
# On the SF list they are ( referenceCount, location ), or empty if not the main parent item
values = self.treeview.item( self.selectedItem, 'values' )
if len( values ) > 1: copyToClipboard( values[1].strip() )
def expandAll( self ):
for item in self.treeview.get_children(): self.treeview.item( item, open=True )
def collapseAll( self ):
for item in self.treeview.get_children(): self.treeview.item( item, open=False )
def sortModsSummaryByOffset( sortByOffset, treeview=None ):
if not treeview:
treeview = modsSummaryTree # might be using this function again a little differently
if sortByOffset:
# Unpack individual child code changes that mods may have, creating a list of all mod code changes
allChanges = [] # Will be filled with ( location, iid )
uniqueLocations = Set()
for iid in treeview.get_children():
children = treeview.get_children( iid )
if len( children ) == 0:
location = int( treeview.item( iid, 'values' )[1], 16 )
if location not in uniqueLocations: # Prevents cases of multiple standalone function references
allChanges.append( ( location, iid ) )
uniqueLocations.add( location )
else:
treeview.detach( iid ) # Removes this item and its decendents from the treeview (doesn't delete them)
# Add this mod's children/code changes to the list
parentName = treeview.item( iid, 'text' )
for childIid in children:
changeType, locationHex, _, _ = treeview.item( childIid, 'values' ) # The full tuple is ( changeType, locationHex, bytesChanged, freeSpaceUsed )
location = int( locationHex, 16 )
if location not in uniqueLocations: # Prevents cases of multiple standalone function references
allChanges.append( ( location, childIid ) )
uniqueLocations.add( location )
# These items will also need their names changed to understand what they belong to, since they'll no longer have a parent
if changeType == 'SF':
sfName = treeview.item( childIid, 'text' ).split( ':' )[1].strip()
treeview.item( childIid, text='Part of "{}" ({})'.format(parentName, sfName) )
else:
treeview.item( childIid, text='Part of "{}"'.format(parentName) )
# Perform the sort on the list and then the treeview items
allChanges.sort()
for index, ( location, iid ) in enumerate( allChanges ): treeview.move( iid, '', index )
treeview.heading( '#0', text='Mod Name / Component Owner' )
settings.set( 'General Settings', 'sortSummaryByOffset', 'True' )
else: # The items have previously been sorted. Restore them to their original order
for index, (modIid, childIids, childNames) in enumerate( treeview.originalSortOrder ):
treeview.move( modIid, '', index )
for index2, childIid in enumerate( childIids ):
treeview.move( childIid, modIid, index2)
treeview.item( childIid, text=childNames[index2] ) # These were renamed during the first sort; need to restore them
treeview.heading( '#0', text='Mod Name' )
settings.set( 'General Settings', 'sortSummaryByOffset', 'False' )
saveOptions()
def showInstalledModsList():
if not problemWithDol():
modsList = listInstalledMods()
cmsg( '\n'.join(modsList), 'Installed Mods ({} total)'.format(len(modsList)), 'left' )
def listInstalledMods():
modNames = []
# Iterate over all items (even those that may be hidden due to sorting)
# for ( modIid, childIids, childNames ) in modsSummaryTree.originalSortOrder:
# tags = modsSummaryTree.item( modIid, 'tags' )
# #print 'item tags', tags
# if 'mainEntry' in tags and not 'notMod' in tags:
# modName = modsSummaryTree.item( modIid, 'text' )
# modNames.append( modName )
for mod in genGlobals['allMods']:
if mod.state == 'enabled': modNames.append( mod.name )
return modNames
def installModsList():
if problemWithDol(): return
entryWindow = PopupScrolledTextWindow( root, title='Mods List Entry', message="You may enter a list of mods (separated by line breaks) that you'd "
"like to install to your " + dol.type.upper() + " here:", height=20, button1Text='Install' )
if not entryWindow.entryText: return
loadRegionOverwriteOptions()
# Parse the given list for mod names (excluding empty lines and line start/end whitespace), and turn it into a set
modsToInstall = []
for line in entryWindow.entryText.strip().splitlines():
if line.strip() != '': modsToInstall.append( line.strip() )
modsToInstall = Set( modsToInstall )
# Check if there are any issues with installing Gecko codes
geckoCodesToSkip = []
if not gecko.environmentSupported or not overwriteOptions[ 'EnableGeckoCodes' ].get():
# Gecko codes are not currently enabled. Check if any Gecko codes are actually included in the list
for mod in genGlobals['allMods']:
if mod.type == 'gecko' and mod.name in modsToInstall: geckoCodesToSkip.append( mod.name )
if geckoCodesToSkip:
# Ask to cancel this operation or proceed without Gecko codes
if not gecko.environmentSupported:
proceed = tkMessageBox.askyesno( 'Proceed without Gecko codes?', 'Something about your current configuration does not allow for Gecko codes '
'(some of which are included in the install list).\n\nWould you like to proceed without those Gecko codes anyway?' )
if not proceed: return
# Offer to enable Gecko codes
elif not overwriteOptions[ 'EnableGeckoCodes' ].get():
# If this is for Melee, add some details to the message
if dol.isMelee and ( gecko.codelistRegion == 'DebugModeRegion' or gecko.codehandlerRegion == 'DebugModeRegion'
or gecko.codelistRegion == 'Debug Mode Region' or gecko.codehandlerRegion == 'Debug Mode Region' ):
meleeDetails = ( "Mostly, this just means that you wouldn't be able to use the vanilla Debug Menu (if you're not "
"sure what that means, then you're probably not using the Debug Menu, and you can just click yes). " )
else: meleeDetails = ''
promptToUser = ('The "Enable Gecko Codes" option is not selected, however Gecko codes are included in the install list.'
'\n\nEnabling Gecko codes means that the regions defined for Gecko codes, ' + gecko.codelistRegion + ' and ' + gecko.codehandlerRegion + ', '
"will be reserved (i.e. may be partially or fully overwritten) for custom code. " + meleeDetails + 'Regions that you have '
'enabled for use can be viewed and modified by clicking on the "Code-Space Options" button on the Mods Library tab. '
'\n\nWould you like to enable these regions for overwrites in order to use Gecko codes?')
geckoRegionsEnabled = willUserAllowGecko( promptToUser, True, root )
if geckoRegionsEnabled: geckoCodesToSkip = []
modsToInstall.difference_update( geckoCodesToSkip ) # Updates in-place
# Remove any mods not found in the library, but also remember them for later
modsNotFound = modsToInstall.difference( genGlobals['allModNames'] )
modsToInstall.difference_update( modsNotFound )
# Iterate all mods in the library. Disable them if they're not in the list above, and enable them if they are
for mod in genGlobals['allMods']:
if mod.state == 'unavailable': continue
elif mod.name in modsToInstall: mod.setState( 'pendingEnable' )
elif mod.state == 'enabled' or mod.state == 'pendingEnable': mod.setState( 'pendingDisable' )
else: mod.setState( 'disabled' )
# Check on and validate the selected mods and attempt to install them
checkForPendingChanges() # Also updates GUI, such as the fill meters
saveSuccessful = saveCodes()
if saveSuccessful:
# Check for any other mods that may not have been installed
modsNotInstalled = modsToInstall.difference( listInstalledMods() )
# Report out a summary of the operation
if modsNotFound or modsNotInstalled or geckoCodesToSkip:
summaryMessage = ''
if modsNotFound: summaryMessage += '\nThe following mods could not be installed\nbecause they could not be found in the library:\n\n' + '\n'.join( modsNotFound ) + '\n'
if geckoCodesToSkip: summaryMessage += '\nThe following mods could not be installed\nbecause Gecko codes are disabled:\n\n' + '\n'.join( geckoCodesToSkip ) + '\n'
if modsNotInstalled: summaryMessage += '\nThe following mods could not be installed\n(likely because there is not enough free space enabled for use):\n\n' + '\n'.join( modsNotInstalled )
cmsg( summaryMessage )
else: msg( 'All mods installed!' )
#=======================================#
# ~ ~ General GUI Modules & Functions ~ ~ #
#=======================================#
class VerticalScrolledFrame( Frame ):
""" This is a widget used in the Mods Library tab for scrolling an entire region.
Use the 'interior' attribute to place widgets inside the scrollable area.
The outer widget is essentially just a Frame; which can be attached using
pack/place/grid geometry managers as normal. """
def __init__( self, parent, *args, **kw ):
Frame.__init__( self, parent, *args, **kw )
self.scrollFlag = True # Used by the scroll wheel handler
# create a canvas object and a vertical scrollbar for scrolling it
self.vscrollbar = Scrollbar( self, orient='vertical' )
self.vscrollbar.pack( fill='y', side='right', expand=False )
self.canvas = Canvas( self, bd=0, highlightthickness=0, yscrollcommand=self.vscrollbar.set, width=430 )
self.canvas.pack( side='left', fill='both', expand=True )
self.vscrollbar.config( command=self.canvas.yview )
# create a frame inside the canvas which will be scrolled with it
self.interior = Frame( self.canvas, relief='ridge' )
self.interior_id = self.canvas.create_window( 0, 0, window=self.interior, anchor='nw' )
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
self.interior.bind( '<Configure>', self._configure_interior )
self.canvas.bind( '<Configure>', self._configure_canvas )
def _configure_interior( self, event ):
# Check if a scrollbar is necessary, and add/remove it as needed.
if self.interior.winfo_height() > self.canvas.winfo_height():
self.vscrollbar.pack( fill='y', side='right', expand=False )
self.scrollFlag = True
else:
self.vscrollbar.pack_forget()
self.scrollFlag = False # Disable scrolling until it's really needed again
# update the scrollbars to match the size of the inner frame
size = ( self.interior.winfo_reqwidth(), self.interior.winfo_reqheight() )
self.canvas.config( scrollregion="0 0 %s %s" % size )
# if self.interior.winfo_reqwidth() != self.canvas.winfo_width():
# # update the canvas's width to fit the inner frame
# self.canvas.config(width=self.interior.winfo_reqwidth())
def _configure_canvas( self, event ):
if self.interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the inner frame's width to fill the canvas
self.canvas.itemconfigure( self.interior_id, width=self.canvas.winfo_width() )
class VerticalScrolledFrame2( Frame ):
""" This is a widget used in the Mods Library tab for scrolling an entire region.
This has a few subtle differences from the original variant, which is that the
scrollbar is placed on the left rather than the right, and the width is not set.
Use the 'interior' attribute to place widgets inside the scrollable area.
The outer widget is essentially just a Frame; which can be attached using
pack/place/grid geometry managers as normal. """
def __init__( self, parent, *args, **kw ):
Frame.__init__( self, parent, *args, **kw )
self.scrollFlag = True # Used by the scroll wheel handler
# create a canvas object and a vertical scrollbar for scrolling it
self.vscrollbar = Scrollbar( self, orient='vertical' )
self.vscrollbar.pack( fill='y', side='left', expand=False )
self.canvas = Canvas( self, bd=0, highlightthickness=0, yscrollcommand=self.vscrollbar.set )
self.canvas.pack( side='right', fill='both', expand=True )
self.vscrollbar.config( command=self.canvas.yview )
# create a frame inside the canvas which will be scrolled with it
self.interior = Frame( self.canvas, relief='ridge' )
self.interior_id = self.canvas.create_window( 0, 0, window=self.interior, anchor='nw' )
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
self.interior.bind( '<Configure>', self._configure_interior )
self.canvas.bind( '<Configure>', self._configure_canvas )
def _configure_interior( self, event ):
# Check if a scrollbar is necessary, and add/remove it as needed.
if self.interior.winfo_height() > self.canvas.winfo_height():
self.vscrollbar.pack( fill='y', side='left', expand=False )
self.scrollFlag = True
else:
self.vscrollbar.pack_forget()
self.scrollFlag = False # Disable scrolling until it's really needed again
# update the scrollbar to match the size of the inner frame
size = ( self.interior.winfo_reqwidth(), self.interior.winfo_reqheight() )
self.canvas.config( scrollregion="0 0 %s %s" % size )
def _configure_canvas( self, event ):
if self.interior.winfo_reqwidth() != self.canvas.winfo_width():
# update the inner frame's width to fill the canvas
self.canvas.itemconfigure( self.interior_id, width=self.canvas.winfo_width() )
def presentableModType( modType, changeType=False ):
""" Converts a modType string to something presentable to a user.
'chanteType=True' indicates that modType deals with a specific code change for a mod,
rather than the whole mod's classification. """
if modType == 'static': modTypeTitle = 'Static Overwrite'
elif modType == 'injection':
if changeType:
modTypeTitle = 'Injection'
else: modTypeTitle = 'Injection Mod'
elif modType == 'standalone':
modTypeTitle = 'Standalone Function'
elif modType == 'gecko': modTypeTitle = 'Gecko Code'
else: modTypeTitle = modType
return modTypeTitle
def cmsg( *args ):
""" Expects arguments for message, title, alignment, and buttons. Only the first is required. """
if len(args) == 4: CopyableMsg( root, message=args[0], title=args[1], alignment=args[2], buttons=args[3] )
elif len(args) == 3: CopyableMsg( root, message=args[0], title=args[1], alignment=args[2] )
elif len(args) == 2: CopyableMsg( root, message=args[0], title=args[1] )
else: CopyableMsg( root, message=args[0] )
def createNewDolMod():
# Create a tab for a new mod in the Mod Construction interface.
newTab = ttk.Frame( constructionNotebook )
constructionNotebook.add( newTab, text='New Mod' )
# Bring the new tab into view for the user.
constructionNotebook.select( newTab )
ModConstructor( newTab, None ).pack( fill='both', expand=1 )
def getTabNames( notebook ):
""" Returns a dictionary of 'key=mod name, value=tab index' """
return { notebook.tab( tab, 'text' ): i for i, tab in enumerate( notebook.tabs() ) }
def getTabByName( notebook, tabName ):
for windowName in notebook.tabs():
targetTab = root.nametowidget( windowName ) # tabName == tab's text != windowName
if notebook.tab( targetTab, option='text' ) == tabName: return targetTab
else: return -1
def getCustomCodeRegions( searchDisabledRegions=False, specificRegion='', codelistStartPosShift=0, codehandlerStartPosShift=0 ):
""" This gets the regions defined for custom code use (regions permitted for overwrites) in settings.py. Returned as a list of tuples of the
form (regionStart, regionEnd). The start position shifts (space reservations for the Gecko codelist/codehandler) should be counted in bytes. """
codeRegions = []
for regionName, regions in dol.customCodeRegions.items():
# Check if this dol region should be included by its BooleanVar option value (or if that's overridden, which is the first check)
if searchDisabledRegions or ( regionName in overwriteOptions and overwriteOptions[regionName].get() ):
# Get all regions if specificRegion is not defined, or get only that region (or that group of regions)
if not specificRegion or regionName == specificRegion:
# Offset the start of (thus excluding) areas that will be partially used by the Gecko codelist or codehandler
if codelistStartPosShift != 0 and gecko.environmentSupported and regionName == gecko.codelistRegion:
codelistRegionStart, codelistRegionEnd = regions[0]
codelistRegionStart += codelistStartPosShift
if codelistRegionEnd - codelistRegionStart > 0: # This excludes the first area if it was sufficiently shrunk
codeRegions.append( (codelistRegionStart, codelistRegionEnd) )
codeRegions.extend( regions[1:] ) # If there happen to be any more regions that have been added for this.
elif codehandlerStartPosShift != 0 and gecko.environmentSupported and regionName == gecko.codehandlerRegion:
codehandlerRegionStart, codehandlerRegionEnd = regions[0]
codehandlerRegionStart += codehandlerStartPosShift
if codehandlerRegionEnd - codehandlerRegionStart > 0: # This excludes the first area if it was sufficiently shrunk
codeRegions.append( (codehandlerRegionStart, codehandlerRegionEnd) )
codeRegions.extend( regions[1:] ) # If there happen to be any more regions that have been added for this.
else:
codeRegions.extend( regions )
# If only looking for a specific revision, there's no need to iterate futher.
if specificRegion: break
return codeRegions
class RevisionPromptWindow( basicWindow ):
""" Prompts the user to select a DOL region and version (together, these are the dol 'revision'). """
def __init__( self, labelMessage, regionSuggestion='', versionSuggestion='' ):
basicWindow.__init__( self, root, 'Select DOL Revision' )
regionOptions = [ 'NTSC', 'PAL' ]
if regionSuggestion not in regionOptions:
regionOptions.append( regionSuggestion )
ttk.Label( self.window, wraplength=240, text=labelMessage ).pack( padx=20, pady=12 )
# Create variables for the region/version details
self.regionChoice = StringVar()
self.versionChoice = StringVar()
self.regionChoice.set( regionSuggestion )
self.versionChoice.set( '1.' + versionSuggestion )
self.region = self.version = ''
# Display the input widgets
inputWrapper = ttk.Frame( self.window )
OptionMenu( inputWrapper, self.regionChoice, *regionOptions ).pack( side='left', padx=8 )
Spinbox( inputWrapper, textvariable=self.versionChoice, from_=1.0, to=1.99, increment=.01, width=4, format='%1.2f' ).pack( side='left', padx=8 )
inputWrapper.pack( pady=(0,12) )
# OK / Cancel buttons
buttonsWrapper = ttk.Frame( self.window )
ttk.Button( buttonsWrapper, text='OK', width=16, command=self.confirm ).pack( side='left', padx=8 )
ttk.Button( buttonsWrapper, text='Cancel', width=16, command=self.close ).pack( side='left', padx=8 )
buttonsWrapper.pack( pady=(0,12) )
# Force focus away from the parent window and wait until the new window is closed to continue.
self.window.grab_set()
root.wait_window( self.window )
# Define button functions
def confirm( self ):
self.region = self.regionChoice.get()
self.version = self.versionChoice.get()
self.window.destroy()
class ShowOptionsWindow( object ):
""" Creates a modeless (non-modal) message window that allows the user to toggle
what regions are allowed to be overwritten by custom code. """
def __init__( self ):
if not dol.data:
msg( 'A game file (ISO) or DOL must be loaded to view these options.' )
elif root.optionsWindow == None:
# Define the window.
self.window = Toplevel()
self.window.title( 'Code-Space Options' )
self.window.attributes( '-toolwindow', 1 ) # Makes window framing small, like a toolbox/widget.
self.window.resizable( width=False, height=False )
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( root )[2:]
self.window.geometry( '+' + str(rootDistanceFromScreenLeft + 180) + '+' + str(rootDistanceFromScreenTop + 180) )
self.window.protocol( 'WM_DELETE_WINDOW', self.close ) # Overrides the 'X' close button.
self.createContents()
else: # The window must already exist. Make sure it's not minimized, and bring it to the foreground
root.optionsWindow.window.deiconify()
root.optionsWindow.window.lift()
def createContents( self ):
root.optionsWindow = self
mainFrame = ttk.Frame( self.window, padding='15 0 15 0' ) # padding order: left, top, right, bottom
Label( mainFrame, text='These are the regions that will be reserved (i.e. may be partially or fully overwritten) for injecting custom code. '
'It is safest to uninstall all mods that may be installed to a region before disabling it. '
'For more information on these regions, or to add your own, see the "settings.py" file.', wraplength=550 ).grid( columnspan=4, pady=12 )
# Create the rows for each region option to be displayed.
row = 1
padx = 5
pady = 3
self.checkboxes = []
for overwriteOptionName, boolVar in overwriteOptions.items():
if overwriteOptionName == 'EnableGeckoCodes': continue
elif overwriteOptionName not in dol.customCodeRegions: continue # An option is loaded for a region not available for the loaded DOL (todo: guess these loops should be reversed in their nesting)
# The checkbox
checkbox = ttk.Checkbutton( mainFrame, variable=boolVar, command=lambda regionName=overwriteOptionName: self.checkBoxClicked(regionName) )
# Title
Label( mainFrame, text=overwriteOptionName ).grid( row=row, column=1, padx=padx, pady=pady )
# Check the space available with this region
totalRegionSpace = 0
tooltipText = []
for i, region in enumerate( dol.customCodeRegions[overwriteOptionName], start=1 ):
spaceAvailable = region[1] - region[0]
totalRegionSpace += spaceAvailable
tooltipText.append( 'Area ' + str(i) + ': ' + uHex(region[0]) + ' - ' + uHex(region[1]) + ' | ' + uHex(spaceAvailable) + ' bytes' )
# Create the label and tooltip for displaying the total region space and details
regionSizeLabel = Label( mainFrame, text=uHex(totalRegionSpace) + ' Bytes', foreground='#777', font="-slant italic" )
regionSizeLabel.grid( row=row, column=2, padx=padx, pady=pady )
ToolTip( regionSizeLabel, delay=300, text='\n'.join(tooltipText), location='e', bg='#c5e1eb', follow_mouse=False, wraplength=1000 )
# Restore button
restoreBtn = ttk.Button( mainFrame, text='Restore', command=lambda regionName=overwriteOptionName: self.restoreRegions(regionName) )
restoreBtn.grid( row=row, column=3, padx=padx, pady=pady )
# Disable regions which are reserved for Gecko codes
if overwriteOptions[ 'EnableGeckoCodes' ].get() and ( overwriteOptionName == gecko.codelistRegion or overwriteOptionName == gecko.codehandlerRegion ):
checkbox['state'] = 'disabled'
restoreBtn['state'] = 'disabled'
checkbox.bind( '<1>', self.checkboxDisabledMessage )
# Attach some info for later use
checkbox.space = totalRegionSpace
checkbox.regionName = overwriteOptionName
checkbox.restoreBtn = restoreBtn
checkbox.grid( row=row, column=0, padx=padx, pady=pady )
self.checkboxes.append( checkbox )
row += 1
# Add the checkbox to enable Gecko codes
if gecko.environmentSupported:
self.enableGeckoChkBox = ttk.Checkbutton( mainFrame, text='Enable Gecko Codes', variable=overwriteOptions['EnableGeckoCodes'], command=self.toggleGeckoEngagement )
else:
self.enableGeckoChkBox = ttk.Checkbutton( mainFrame, text='Enable Gecko Codes', variable=overwriteOptions['EnableGeckoCodes'], command=self.toggleGeckoEngagement, state='disabled' )
self.enableGeckoChkBox.grid( columnspan=4, pady=12 )
# Add the total space label and the Details button to the bottom of the window
lastRow = Frame( mainFrame )
self.totalSpaceLabel = StringVar()
self.calculateSpaceAvailable()
Label( lastRow, textvariable=self.totalSpaceLabel ).pack( side='left', padx=11 )
detailsLabel = Label( lastRow, text='Details', foreground='#03f', cursor='hand2' )
detailsLabel.pack( side='left', padx=11 )
detailsLabel.bind( '<1>', self.extraDetails )
lastRow.grid( columnspan=4, pady=12 )
mainFrame.pack()
def checkBoxClicked( self, regionName ):
# The program's checkbox boolVars (in overwriteOptions) has already been updated by the checkbox. However, the variables in the "settings"
# object still need to be updated. saveOptions does this, as well as saves the settings to the options file.
saveOptions()
self.calculateSpaceAvailable()
checkForPendingChanges( changesArePending=True )
playSound( 'menuChange' )
def toggleGeckoEngagement( self ):
""" Called by the 'Enable Gecko Codes' checkbox when it changes state. When enabling, and either
of the gecko regions are not selected for use, prompt with a usage warning. """
if overwriteOptions['EnableGeckoCodes'].get() and ( not overwriteOptions[ gecko.codelistRegion ].get() or not overwriteOptions[ gecko.codehandlerRegion ].get() ) :
if dol.isMelee and ( gecko.codelistRegion == 'DebugModeRegion' or gecko.codehandlerRegion == 'DebugModeRegion'
or gecko.codelistRegion == 'Debug Mode Region' or gecko.codehandlerRegion == 'Debug Mode Region' ):
meleeDetails = ( " Mostly, this just means that you won't be able to use the vanilla Debug Menu (if you're not "
"sure what that means, then you're probably not using the Debug Menu, and you can just click yes)." )
else: meleeDetails = ''
promptToUser = ( 'Enabling Gecko codes means that the regions assigned for Gecko codes, ' + gecko.codelistRegion + ' and ' + gecko.codehandlerRegion + ', '
"will be reserved (i.e. may be partially or fully overwritten) for custom code." + meleeDetails + \
'\n\nWould you like to enable these regions for overwrites in order to use Gecko codes?' )
else: promptToUser = '' # Just going to use the following function to set some options & states (no prompt to the user)
self.calculateSpaceAvailable()
willUserAllowGecko( promptToUser, True, self.window ) # This will also check for pending changes, or for enabled codes (if gecko codes are allowed)
playSound( 'menuChange' )
def restoreRegions( self, regionName ):
""" Called by the regions' "Restore" button. Restores custom code or zeroed out space for the chosen space back to vanilla code. """
if dol.isMelee and regionName.replace( ' ', '' ) == 'ScreenshotRegions':
noteOnScreenshotNops = 'The nops required to enable these regions will also be reverted.'
else: noteOnScreenshotNops = ''
restoreApproved = tkMessageBox.askyesno( 'Restoration Confirmation', 'This action will overwrite the region(s) defined by "' + regionName + '" with the '
"game's default/vanilla hex, and deselect the region(s) so that they will not be used for custom code. Any custom code previously saved here "
"will be moved to the next available region upon saving. " + noteOnScreenshotNops + "\n\nAre you sure you want to do this?", parent=self.window )
if not restoreApproved:
return
# Update the option for whether or not this region should be used (and save this to file)
overwriteOptions[ regionName ].set( False )
saveOptions()
# Restore nops if the regions being restored are the "screenshot" regions
if dol.isMelee and regionName.replace( ' ', '' ) == 'ScreenshotRegions':
# Commands at the points below were replaced with a nop to use this region. Restore them to vanilla
screenshotRegionNopSites = { 'NTSC 1.03': (0x1a1b64, 0x1a1c50), 'NTSC 1.02': (0x1a1b64, 0x1a1c50), 'NTSC 1.01': (0x1a151c, 0x1a1608),
'NTSC 1.00': (0x1a0e1c, 0x1a0f08), 'PAL 1.00': (0x1a2668, 0x1a2754) }
nopSites = screenshotRegionNopSites[ dol.revision ]
nop0Hex = getVanillaHex( nopSites[0] ) # , revision=revisionList[0]
nop1Hex = getVanillaHex( nopSites[1] )
if not nop0Hex or not nop1Hex:
msg( 'Unable to uninstall the ScreenshotRegion nops (at ' + hex(nopSites[0]) + ' and ' + hex(nopSites[1]) + '). This was likely because an original DOL for this game revision '
'was not found in the original DOLs folder. In order to restore regions and look up vanilla code, you must place an original copy of the DOL here:'
'\n\n' + dolsFolder + '\n\nThe filename should be "[region] [version].dol", for example, "NTSC 1.02.dol"', 'Unable to Uninstall ScreenshotRegion nops', self.window )
else:
replaceHex( nopSites[0], nop0Hex )
replaceHex( nopSites[1], nop1Hex )
# Restore each area in this region
regionsUnrestored = []
for regionStart, regionEnd in dol.customCodeRegions[ regionName ]:
vanillaHex = getVanillaHex( regionStart, regionEnd - regionStart )
if not vanillaHex:
regionsUnrestored.append( uHex(regionStart) + '-' + uHex(regionEnd) )
else:
replaceHex( regionStart, vanillaHex )
# Notify the user of any regions that could not be restored for some reason
if regionsUnrestored:
if dol.revision in originalDols:
msg( "The regions below could not be restored. This is likely due to them being out of range of the DOL "
"(i.e. pointing to an area beyond the end of the file).\n\n" + '\n'.join(regionsUnrestored), 'Unable to restore regions', self.window )
else: # The revisions in revisionList don't appear to be loaded.
msg( 'Some regions could not be restored. This was likely because an original DOL for this game revision '
'was not found in the original DOLs folder. In order to restore regions and look up vanilla code, you must place an original '
'copy of the DOL here:\n\n' + dolsFolder + '\n\nThe filename should be "[region] [version].dol", for example, "NTSC 1.02.dol". '
'The regions below could not be restored:\n\n"' + '\n'.join(regionsUnrestored), 'Unable to Uninstall ScreenshotRegion nops', self.window )
else:
programStatus.set( 'Regions Successfully Restored' )
playSound( 'menuSelect' )
# Indicate to the program that changes have happened, so that the user can use the 'save' buttons.
checkForPendingChanges( changesArePending=True )
def checkboxDisabledMessage( self, event ):
""" This will be executed on click events to the checkboxes for the regions assigned to the Gecko codelist and codehandler, since they will be disabled. """
if overwriteOptions[ 'EnableGeckoCodes' ].get():
msg( "You currently have Gecko codes enabled, which require use of this region. "
"You must uncheck the 'Enable Gecko codes' checkbox if you want to unselect this region.", "Can't let you do that, Star Fox!", self.window )
def calculateSpaceAvailable( self ):
space = 0
for checkbox in self.checkboxes:
if overwriteOptions[checkbox.regionName].get(): space += checkbox.space
self.totalSpaceLabel.set( 'Total Space Available: ' + uHex(space) + ' Bytes (' + humansize(space) + ')' )
def extraDetails( self, event ):
msg( 'Each of the region options displayed here may be a single contiguous area in the DOL, or a collection of several areas (see settings.py for '
'the exact definitions of each region). Regions assigned for the Gecko codehandler and codelist may be changed in the settings.py file under Gecko Configuration. '
'However, if one of these is a group of areas, only the first contiguous area among the group will be used for the codehandler or codelist.'
"""\n\nIf Gecko codes are used, you may notice that the "Total Space Available" shown here will be higher than what's reported by the Codes Free Space indicators in the """
'main program window. That is because the free space indicators do not count space that will be assigned for the Gecko codehandler (' + uHex(gecko.codehandlerLength) + ' bytes), '
'the codelist wrapper (0x10 bytes), or the codelist.', '', self.window )
def close( self ):
root.optionsWindow = None
self.window.destroy()
def onWindowResize( event ):
""" Fires when the window is resized (including maximize/restore functionality).
This is primarily used to update the position of the Mods Library's control panel. """
# Reset the timer if it's already running
if rootFrame.resizeTimer:
rootFrame.after_cancel( rootFrame.resizeTimer )
timeout = 0
# Toggle the flag describing whether the program is maximized
if root.state() == 'zoomed':
root.maximized = True
elif root.maximized:
# In this case, the root state is not zoomed (but the flag says it is), meaning the window must have just now been unmaximized
root.maximized = False
timeout = 100 # Provide a slight delay before panel realignment if the program window is being unmaximized by dragging
# This prevents a timing issue with root.update() in the alignment function, which would cause it to undo the unmaximize
rootFrame.resizeTimer = rootFrame.after( timeout, realignControlPanel )
def onMouseWheelScroll( event ):
""" Checks the widget under the mouse when a scroll event occurs, and then looks through the GUI geometry
for parent widgets that may have scroll wheel support (indicated by the "scrollFlag" bool attribute). """
# Cross-platform resources on scrolling:
# - http://stackoverflow.com/questions/17355902/python-tkinter-binding-mousewheel-to-scrollbar
# - https://www.daniweb.com/programming/software-development/code/217059/using-the-mouse-wheel-with-tkinter-python
# Get the widget currently under the mouse
widget = root.winfo_containing( event.x_root, event.y_root )
# Traverse upwards through the parent widgets, looking for a scrollable widget
while widget:
if getattr( widget, 'scrollFlag', False ): # For a VerticalScrolledFrame widget
widget = widget.canvas
break
elif widget.winfo_class() == 'Text': break
widget = widget.master
# If the above loop doesn't break (no scrollable found), widget will reach the top level item and become None.
if widget:
widget.yview_scroll( -1*(event.delta/30), "units" )
def selectAll( event ): # Adds bindings for normal CTRL-A functionality.
if event.widget.winfo_class() == 'Text': event.widget.tag_add( 'sel', '1.0', 'end' )
elif event.widget.winfo_class() == 'TEntry': event.widget.selection_range( 0, 'end' )
def playSoundHelper( soundFilePath ):
""" Helper (thread-target) function for playSound(). Runs in a separate
thread to prevent audio playback from blocking main execution. """
try:
# Instantiate PyAudio (1)
p = pyaudio.PyAudio()
wf = wave.open( soundFilePath, 'rb' )
# Open an audio data stream (2)
stream = p.open( format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True )
# Continuously read/write data from the file to the stream until there is no data left (3)
data = wf.readframes( 1024 )
while len( data ) > 0:
stream.write( data )
data = wf.readframes( 1024 )
# Stop the stream (4)
stream.stop_stream()
stream.close()
# Close PyAudio (5)
p.terminate()
except Exception as err:
soundFileName = os.path.basename( soundFilePath )
print 'Unable to play "{}" sound.'.format( soundFileName )
print err
def playSound( soundFileName ):
if not audioInitialized: return
elif soundFileName not in soundBank:
msg( 'The "{}" sound file could not be found in the "{}\\sfx" folder.'.format(soundFileName, scriptHomeFolder) )
return
# Play the audio clip in a separate thread so that it's non-blocking
# (Yes, pyaudio has a "non-blocking" way for playback already,
# but that too blocks anyway due to waiting for the thread to finish.)
audioThread = Thread( target=playSoundHelper, args=(soundBank[soundFileName],) )
audioThread.start()
# Function definitions complete
#===========#
# ~ ~ GUI ~ ~ #
#===========#
if __name__ == '__main__':
# Parse command line input
import argparse
cmdParser = argparse.ArgumentParser()
cmdParser.add_argument( 'inputFile', nargs='?', help='an optional file path can be provided via command line or by drag-and-dropping onto the program icon' )
cmdParser.add_argument( '-d', '--debugMode', action='store_true', help='creates a "Debug Log" text file for outputting various program debug and error messages' )
cmdArgs = cmdParser.parse_args()
root = Tk() # Instantiation of GUI program loop.
root.withdraw() # Keep the GUI minimized until it is fully generated
loadImageBank() # Must be loaded after root is initialized
## Program icon and title display.
root.tk.call( 'wm', 'iconphoto', root._w, imageBank['appIcon'] )
root.title( " Melee Code Manager - v" + programVersion )
root.resizable( width=True, height=True )
root.geometry( '880x680' )
root.minsize( width=640, height=620 )
dnd = TkDND( root )
rootFrame = ttk.Frame( root )
rootFrame.resizeTimer = None
root.stageSelectionsWindow = None # Used for later storage of the window.
root.itemSelectionsWindow = None
root.optionsWindow = None
root.rumbleSelectionWindow = None
root.maximized = False
colorBank = {
'freeSpaceIndicatorGood': '#c1ffb6',
'freeSpaceIndicatorBad': '#ffcccc'
#'altFontColor': '#d1cede' # A shade of silver; useful for high-contrast system themes
}
# Load the program's options and images.
onlyUpdateGameSettings = BooleanVar()
onlyUpdateGameSettings.set( False ) # Should be updated when the default settings are loaded (loadGeneralOptions())
loadGeneralOptions()
customCodeProcessor = CommandProcessor() # Must be initialized after gettings general settings, so the base include paths are set correctly
# Set the program's default font size and color
ttkStyle = ttk.Style()
globalFontSize = int( settingsFile.globalFontSize )
for font in tkFont.names():
tkFont.nametofont( font ).configure( size=globalFontSize )
# Set the program's default font color (todo: standardize label usage so this can be used. will still need other widget modifications for high contrast)
# if settings.has_option( 'General Settings', 'altFontColor' ):
# globalFontColor = settings.get( 'General Settings', 'altFontColor' )
# # Validate the user color
# try:
# root.winfo_rgb( globalFontColor ) # Returns an RGB tuple if successful
# except:
# msg( 'The alternate color, "' + globalFontColor + '", is not a valid color. The string should be written as #RRGGBB, '
# 'or a basic color such as, "blue", "teal", "orange", etc. The default font color will be used instead.' )
# globalFontColor = '#071240'
# # Set the new color for everything
# ttkStyle.configure( '.', font="TkDefaultFont", foreground=globalFontColor )
# Set some other global custom widget styling
ttkStyle.map( "TNotebook.Tab", foreground=[("selected", '#03f')], padding=[('selected', 2)] )
ttkStyle.configure( "TNotebook.Tab", foreground='#071240', padding=1 )
ttkStyle.configure( 'pendingSave.TButton', background='#aaffaa' )
ttkStyle.configure( 'red.TButton', background='#ee9999', bordercolor='#ee9999', foreground='red' )
# Root Row 1 | File Input
rootRow1 = ttk.Frame( rootFrame, padding="12 12 12 12" )
openedFilePath = StringVar()
ttk.Label( rootRow1, text="ISO / DOL:" ).pack( side='left' )
fileEntry = ttk.Entry( rootRow1, textvariable=openedFilePath, font='TkTextFont', state='disabled' )
fileEntry.pack( side='left', fill='x', expand=1, padx=12 )
fileEntry.bind( '<Return>', openFileByField )
mainOpenBtn = ttk.Button( rootRow1, text="Open", command=openFileByButton, width=14, state='disabled' )
mainOpenBtn.pack()
rootRow1.pack( fill='x', side='top' )
# Root Row 2 | Begin tabbed interface.
mainNotebook = ttk.Notebook( rootFrame )
# Tab 1 | Mods Library
modsLibraryTab = ttk.Frame( mainNotebook, takefocus=True )
mainNotebook.add( modsLibraryTab, text=' Mods Library ', padding="0 0 0 0" ) # Padding: L, T, R, B
modsLibraryTab.mainRow = ttk.Frame( modsLibraryTab )
freeSpaceUsage = IntVar()
freeSpaceUsage.set( 0 )
freeSpaceIndicator = ttk.Progressbar( modsLibraryTab.mainRow, orient='vertical', mode='determinate', variable=freeSpaceUsage, maximum=100 )
freeSpaceIndicator.pack( anchor='w', side='left', fill='y', padx=12, pady=9 )
freeSpaceIndicator.toolTip = ToolTip( freeSpaceIndicator, delay=600, text='Standard Codes Free Space', location='n', bg=colorBank['freeSpaceIndicatorGood'], follow_mouse=True, wraplength=600 )
freeGeckoSpaceUsage = IntVar()
freeGeckoSpaceUsage.set( 0 )
freeGeckoSpaceIndicator = ttk.Progressbar( modsLibraryTab.mainRow, orient='vertical', mode='determinate', variable=freeGeckoSpaceUsage, maximum=100 )
freeGeckoSpaceIndicator.pack( anchor='w', side='left', fill='y', padx=(0, 12), pady=9 )
freeGeckoSpaceIndicator.toolTip = ToolTip( freeGeckoSpaceIndicator, delay=600, text='Gecko Codes Free Space', location='n', bg=colorBank['freeSpaceIndicatorGood'], follow_mouse=True, wraplength=600 )
modsLibraryNotebook = ttk.Notebook( modsLibraryTab.mainRow ) # Code module tabs will be children of this
modsLibraryNotebook.pack( fill='both', expand=1, pady=7 )
modsLibraryNotebook.bind( '<<NotebookTabChanged>>', onTabChange )
modsLibraryNotebook.isScanning = False
modsLibraryNotebook.stopToRescan = False
modsLibraryTab.mainRow.pack( side='bottom', fill='both', expand=True )
# - Control Panel -
controlPanel = ttk.Frame( modsLibraryTab.mainRow, padding="20 8 20 20" ) # Padding: L, T, R, B
programStatus = StringVar()
programStatus.set( '' )
Label( controlPanel, textvariable=programStatus, fg='#2a2').pack( pady=5 )
ttk.Button( controlPanel, text='Open this File', command=openLibraryFile ).pack( pady=4, padx=6, ipadx=8 )
ttk.Button( controlPanel, text='Open Mods Library Folder', command=openModsLibrary ).pack( pady=4, padx=6, ipadx=8 )
ttk.Separator( controlPanel, orient='horizontal' ).pack( fill='x', padx=24, pady=7 )
saveButtonsContainer = ttk.Frame( controlPanel, padding="0 0 0 0" )
saveChangesBtn = ttk.Button( saveButtonsContainer, text='Save', command=saveCodes, state='disabled', width=12 )
saveChangesBtn.pack( side='left', padx=6 )
saveChangesAsBtn = ttk.Button( saveButtonsContainer, text='Save As...', command=saveAs, state='disabled', width=12 )
saveChangesAsBtn.pack( side='left', padx=6 )
saveButtonsContainer.pack( pady=4 )
createFileContainer = ttk.Frame( controlPanel, padding="0 0 0 0" )
ttk.Button( createFileContainer, text='Create INI', command=saveIniFile ).pack( side='left', padx=6 )
ttk.Button( createFileContainer, text='Create GCT', command=saveGctFile ).pack( side='left', padx=6 )
createFileContainer.pack( pady=4 )
ttk.Separator( controlPanel, orient='horizontal' ).pack( fill='x', padx=24, pady=7 )
restoreDolBtn = ttk.Button( controlPanel, text='Restore Original DOL', state='disabled', command=restoreOriginalDol, width=23 )
restoreDolBtn.pack( pady=4 )
importFileBtn = ttk.Button( controlPanel, text='Import into ISO', state='disabled', command=importIntoISO, width=23 )
importFileBtn.pack( pady=4 )
exportFileBtn = ttk.Button( controlPanel, text='Export DOL', state='disabled', command=exportDOL, width=23 )
exportFileBtn.pack( pady=4 )
ttk.Separator( controlPanel, orient='horizontal' ).pack( fill='x', padx=24, pady=7 )
selectBtnsContainer = ttk.Frame( controlPanel, padding="0 0 0 0" )
selectBtnsContainer.selectAllBtn = ttk.Button( selectBtnsContainer, text='Select All', width=12 )
selectBtnsContainer.deselectAllBtn = ttk.Button( selectBtnsContainer, text='Deselect All', width=12 )
selectBtnsContainer.selectAllBtn.pack( side='left', padx=6, pady=0 )
selectBtnsContainer.deselectAllBtn.pack( side='left', padx=6, pady=0 )
selectBtnsContainer.selectAllBtn.bind( '<Button-1>', selectAllMods )
selectBtnsContainer.selectAllBtn.bind( '<Shift-Button-1>', selectWholeLibrary )
selectBtnsContainer.deselectAllBtn.bind( '<Button-1>', deselectAllMods )
selectBtnsContainer.deselectAllBtn.bind( '<Shift-Button-1>', deselectWholeLibrary )
ToolTip( selectBtnsContainer.selectAllBtn, delay=600, justify='center', text='Shift-Click to select\nwhole library' )
ToolTip( selectBtnsContainer.deselectAllBtn, delay=600, justify='center', text='Shift-Click to deselect\nwhole library' )
selectBtnsContainer.pack( pady=4 )
ttk.Button( controlPanel, text=' Rescan for Mods ', command=scanModsLibrary ).pack( pady=4 )
showRegionOptionsBtn = ttk.Button( controlPanel, text=' Code-Space Options ', state='disabled', command=ShowOptionsWindow )
showRegionOptionsBtn.pack( side='bottom' )
installedModsTabLabel = StringVar()
installedModsTabLabel.set( '' )
ttk.Label( controlPanel, textvariable=installedModsTabLabel ).pack( side='bottom', pady=(0, 12) )
# Mod Library selection button
librarySelectionLabel = Label( modsLibraryTab.mainRow, image=imageBank['books'], cursor='hand2' )
librarySelectionLabel.hoverText = StringVar()
librarySelectionLabel.hoverText.set( 'Select Mods Library.\tCurrent library:\n' + getModsFolderPath() )
librarySelectionLabel.bind( '<1>', lambda event: ModsLibrarySelector(root) )
ToolTip( librarySelectionLabel, delay=600, justify='center', location='w', textvariable=librarySelectionLabel.hoverText, wraplength=600 )
# - Control Panel end -
# Tab 2 | Mod Construction (Viewing / Editing)
constructionTab = ttk.Frame( mainNotebook, takefocus=True )
mainNotebook.add( constructionTab, text=' Mod Construction ' )
constructionTab.row1 = Frame( constructionTab )
ttk.Button( constructionTab.row1, text='Add New Mod to Library', command=createNewDolMod ).pack( side='left', ipadx=10, padx=20 )
ttk.Button( constructionTab.row1, text='Open Mods Library Folder', command=openModsLibrary ).pack( side='left', ipadx=10, padx=20 )
ttk.Button( constructionTab.row1, text='ASM <-> HEX Converter', command=lambda: AsmToHexConverter() ).pack( side='left', ipadx=10, padx=20 )
ttk.Button( constructionTab.row1, text='Mod Search', command=enterSearchMode ).pack( side='left', ipadx=10, padx=20 )
constructionTab.row1.pack( padx=16, pady=7 )
constructionNotebook = ttk.Notebook( constructionTab )
constructionNotebook.pack( fill='both', expand=1 )
try:
ttk.Label( constructionNotebook, image=imageBank['Bowser2'], background='white' ).place( relx=0.5, rely=0.5, anchor='center' )
except: pass
# Tab 3 | Default Game Settings
settingsTab = ttk.Frame( mainNotebook, takefocus=True )
mainNotebook.add( settingsTab, text=' Default Game Settings ' )
def initializeCurrentGameSettingsValues():
for widgetSettingID in gameSettingsTable:
currentGameSettingsValues[widgetSettingID] = StringVar()
if widgetSettingID == 'stageToggleSetting' or widgetSettingID == 'itemToggleSetting': currentGameSettingsValues[widgetSettingID].set( 'FFFFFFFF' )
else: currentGameSettingsValues[widgetSettingID].set( '- -' )
currentGameSettingsValues = {}
initializeCurrentGameSettingsValues()
tab2Row1 = ttk.Frame( settingsTab, padding="17 25 20 0" ) # Padding: L, T, R, B
settingsGroup1 = ttk.Labelframe( tab2Row1, text=' Custom Rules ', labelanchor='n', padding=5 )
Label( settingsGroup1, text='Game Mode:' ).grid( row=0, column=0 )
gameModeControl = OptionMenu(settingsGroup1, currentGameSettingsValues['gameModeSetting'], 'Time', 'Stock', 'Coin', 'Bonus',
command=lambda(value): updateDefaultGameSettingWidget( 'gameModeControl', value ))
gameModeControl.grid(row=0, column=1)
Label(settingsGroup1, text='Time Limit:').grid(row=1, column=0)
gameTimeControl = Spinbox(settingsGroup1, from_=0, to=99, wrap=True, width=3, textvariable=currentGameSettingsValues['gameTimeSetting'],
command=lambda: updateDefaultGameSettingWidget( 'gameTimeControl', currentGameSettingsValues['gameTimeSetting'].get() ))
gameTimeControl.grid(row=1, column=1)
Label(settingsGroup1, text='Stock Count:').grid(row=2, column=0)
stockCountControl = Spinbox(settingsGroup1, from_=0, to=99, wrap=True, width=3, textvariable=currentGameSettingsValues['stockCountSetting'],
command=lambda: updateDefaultGameSettingWidget( 'stockCountControl', currentGameSettingsValues['stockCountSetting'].get() ))
stockCountControl.grid(row=2, column=1)
Label(settingsGroup1, text='Handicap:').grid(row=3, column=0)
handicapControl = OptionMenu(settingsGroup1, currentGameSettingsValues['handicapSetting'], 'Off', 'Auto', 'On',
command=lambda(value): updateDefaultGameSettingWidget( 'handicapControl', value ))
handicapControl.grid(row=3, column=1)
Label(settingsGroup1, text='Damage Ratio:').grid(row=4, column=0)
damageRatioControl = Spinbox(settingsGroup1, from_=.5, to=2, increment=.1, wrap=True, width=3, textvariable=currentGameSettingsValues['damageRatioSetting'],
command=lambda: updateDefaultGameSettingWidget( 'damageRatioControl', currentGameSettingsValues['damageRatioSetting'].get() ))
damageRatioControl.grid(row=4, column=1)
Label(settingsGroup1, text='Stage Selection:').grid(row=5, column=0)
stageSelectionControl = OptionMenu(settingsGroup1, currentGameSettingsValues['stageSelectionSetting'], 'On', 'Random', 'Ordered', 'Turns', 'Loser',
command=lambda(value): updateDefaultGameSettingWidget( 'stageSelectionControl', value ))
stageSelectionControl.grid(row=5, column=1)
settingsGroup1.grid_columnconfigure(1, minsize=100)
for widget in settingsGroup1.winfo_children():
widget.grid_configure(padx=5, pady=5)
settingsGroup1.grid( column=0, row=0 )
# End of Group 1 / Start of Group 2
settingsGroup2 = Frame(tab2Row1)
additionalRules = ttk.Labelframe(settingsGroup2, text=' Additional Rules ', labelanchor='n', padding=5)
Label(additionalRules, text='Stock Time Limit:').grid(row=0, column=0)
stockTimeControl = Spinbox(additionalRules, from_=0, to=99, wrap=True, width=3, textvariable=currentGameSettingsValues['stockTimeSetting'],
command=lambda: updateDefaultGameSettingWidget( 'stockTimeControl', currentGameSettingsValues['stockTimeSetting'].get() ))
stockTimeControl.grid(row=0, column=1)
Label(additionalRules, text='Friendly Fire:').grid(row=1, column=0)
friendlyFireControl = OptionMenu(additionalRules, currentGameSettingsValues['friendlyFireSetting'], 'Off', 'On',
command=lambda(value): updateDefaultGameSettingWidget( 'friendlyFireControl', value ))
friendlyFireControl.grid(row=1, column=1)
Label(additionalRules, text='Pause:').grid(row=2, column=0)
pauseControl = OptionMenu(additionalRules, currentGameSettingsValues['pauseSetting'], 'Off', 'On',
command=lambda(value): updateDefaultGameSettingWidget( 'pauseControl', value ))
pauseControl.grid(row=2, column=1)
Label(additionalRules, text='Score Display:').grid(row=3, column=0)
scoreDisplayControl = OptionMenu(additionalRules, currentGameSettingsValues['scoreDisplaySetting'], 'Off', 'On',
command=lambda(value): updateDefaultGameSettingWidget( 'scoreDisplayControl', value ))
scoreDisplayControl.grid(row=3, column=1)
Label(additionalRules, text='Self-Destructs:').grid(row=4, column=0)
selfDestructsControl = OptionMenu(additionalRules, currentGameSettingsValues['selfDestructsSetting'], '0', '-1', '-2',
command=lambda(value): updateDefaultGameSettingWidget( 'selfDestructsControl', value ))
selfDestructsControl.grid(row=4, column=1)
additionalRules.grid_columnconfigure(1, minsize=80)
for widget in additionalRules.winfo_children():
widget.grid_configure(padx=5, pady=5)
additionalRules.pack()
settingsGroup2.grid( column=1, row=0, pady=15 )
# End of Group 2 / Start of Group 3
tab2Row1.buttonsGroup = Frame(tab2Row1)
programStatusLabel = Label( tab2Row1.buttonsGroup, textvariable=programStatus, fg='#2a2' )
programStatusLabel.pack( pady=8 )
ttk.Button( tab2Row1.buttonsGroup, text=' Set to vMelee \nGame Defaults', command=updateMeleeToVanillaGameSettings, width=20 ).pack( pady=7 )
ttk.Button( tab2Row1.buttonsGroup, text=' Set to standard \nTournament Defaults', command=updateMeleeToTournamentGameSettings, width=20 ).pack()
ttk.Separator( tab2Row1.buttonsGroup, orient='horizontal' ).pack( fill='x', padx=0, pady=14 )
saveButtonsContainer2 = ttk.Frame( tab2Row1.buttonsGroup, padding="0 0 0 0" )
saveChangesBtn2 = ttk.Button( saveButtonsContainer2, text='Save', command=saveCodes, state='disabled', width=12 )
saveChangesBtn2.pack( side='left', padx=12 )
saveChangesAsBtn2 = ttk.Button( saveButtonsContainer2, text='Save As...', command=saveAs, state='disabled', width=12 )
saveChangesAsBtn2.pack( side='left', padx=12 )
saveButtonsContainer2.pack()
ttk.Checkbutton( tab2Row1.buttonsGroup, onvalue=True, offvalue=False, variable=onlyUpdateGameSettings,
text=' Update Default\n Game Settings Only', command=onUpdateDefaultGameSettingsOnlyToggle ).pack( pady=15 )
tab2Row1.buttonsGroup.grid( column=2, row=0 )
tab2Row1.columnconfigure( 'all', weight=1 )
tab2Row1.pack( anchor='n', fill='x' )
# The widget below exists so that this setting may be controlled/processed like all the others. It is then "mimicked" in the item selections window.
itemFrequencyMimic = StringVar()
itemFrequencyMimic.set( '- -' )
itemFrequencyControl = OptionMenu( tab2Row1, currentGameSettingsValues['itemFrequencySetting'],
'None', 'Very Low', 'Low', 'Medium', 'High', 'Very High', 'Very Very High', 'Extremely High' )
# End of Tab 2, Row 1 / Start of Row 2
tab2Row2 = Frame(settingsTab)
stageToggleControl = ttk.Button( tab2Row2, text='Random Stage Selection', command=openStageSelectionWindow, width=30 )
stageToggleControl.pack( side='left', padx=18, pady=0 )
itemToggleControl = ttk.Button( tab2Row2, text='Item Switch', command=openItemSelectionsWindow, width=30 )
itemToggleControl.pack( side='left', padx=18, pady=0 )
# gameSettingsDefaults = {} # For a more concise solution, code for the other settings could eventually be switched to use this, or combined with the gameSettingsTable
rumbleToggleControl = ttk.Button( tab2Row2, text='Rumble Settings', command=rumbleSelectWindow, width=30 )
rumbleToggleControl.pack( side='left', padx=18, pady=0 )
tab2Row2.pack(anchor='n', pady=12)
# End of Tab 2, Row 2 / Start of Row 3
try: # Place the bobomb images
tab2Row3 = Frame(settingsTab)
Label( tab2Row3, image=imageBank['bobombsSitting'] ).place( relx=.333, y=50, anchor='n' )
Label( tab2Row3, image=imageBank['bobombWalking'] ).place( relx=.667, y=35, anchor='n' )
tab2Row3.pack( fill='both', expand=True )
except Exception as err:
print 'Wait! Where da bob-ombs!?'
print err
# Tab 4 | Summary
summaryTab = ttk.Frame( mainNotebook, padding="15 0 15 12", takefocus=True )
mainNotebook.add( summaryTab, text=' Summary ' )
summaryTabFirstRow = ttk.Frame( summaryTab )
totalModsInLibraryLabel = StringVar()
totalModsInstalledLabel = StringVar()
totalModsInLibraryLabel.set( 'Total Mods in Library: 0' )
totalModsInstalledLabel.set( 'Mods Installed:' )
Label( summaryTabFirstRow, textvariable=totalModsInLibraryLabel ).grid( column=0, row=0, sticky='w' )
Label( summaryTabFirstRow, textvariable=totalModsInstalledLabel ).grid( column=0, row=1, sticky='w' )
summaryTabFirstRow.rightColumnFrame = ttk.Frame( summaryTabFirstRow )
ttk.Button( summaryTabFirstRow.rightColumnFrame, text='Create Installed Mods List', command=showInstalledModsList, state='disabled' ).pack( side='left', ipadx=12, padx=5 )
ttk.Button( summaryTabFirstRow.rightColumnFrame, text='Install Mods List', command=installModsList, state='disabled' ).pack( side='left', ipadx=12, padx=5 )
ttk.Button( summaryTabFirstRow.rightColumnFrame, text='View DOL Hex', command=viewDolHex, state='disabled' ).pack( side='left', ipadx=12, padx=5 ) #.pack( side='right', , padx=75 )
summaryTabFirstRow.rightColumnFrame.grid( column=1, row=0, rowspan=2, sticky='e' )
summaryTabFirstRow.pack( fill='x', padx=65, pady=9 )
summaryTabFirstRow.columnconfigure( 0, weight=1 )
summaryTabFirstRow.columnconfigure( 1, weight=1 )
# Create the Mods Summary table
modsSummaryRow = ttk.Frame( summaryTab, padding='10 0 10 0' )
modsSummaryScroller = Scrollbar( modsSummaryRow )
modsSummaryTree = ttk.Treeview( modsSummaryRow, columns=('type', 'location', 'bytesChanged', 'spaceUsed'), selectmode='none',
yscrollcommand=modsSummaryScroller.set ) # First icon column, #0, included by default
if settings.getboolean( 'General Settings', 'sortSummaryByOffset' ):
modsSummaryTree.heading( '#0', anchor='center', text='Mod Name / Component Owner' )
else: modsSummaryTree.heading( '#0', anchor='center', text='Mod Name' )
modsSummaryTree.column( '#0', anchor='center', minwidth=280, stretch=1 )
modsSummaryTree.heading( 'type', anchor='center', text='Type' )
modsSummaryTree.column( 'type', anchor='center', minwidth=60, stretch=0, width=60 )
def toggleSummaryLocationSortMethod():
sortModsSummaryByOffset( not settings.getboolean( 'General Settings', 'sortSummaryByOffset' ) )
# Determine the starting text for the location (DOL/RAM Offset) column
if settings.get( 'General Settings', 'summaryOffsetView' ).lstrip()[0].lower().startswith( 'd' ): # 'd' for dolOffset
modsSummaryTree.heading( 'location', anchor='center', text='DOL Offset', command=toggleSummaryLocationSortMethod )
else: modsSummaryTree.heading( 'location', anchor='center', text='RAM Address', command=toggleSummaryLocationSortMethod )
modsSummaryTree.column( 'location', anchor='center', minwidth=110, stretch=0, width=110 )
modsSummaryTree.heading( 'bytesChanged', anchor='center', text='Bytes Changed' )
modsSummaryTree.column( 'bytesChanged', anchor='center', minwidth=110, stretch=0, width=110 )
modsSummaryTree.heading( 'spaceUsed', anchor='center', text='Free Space Used' )
modsSummaryTree.column( 'spaceUsed', anchor='center', minwidth=120, stretch=0, width=110 )
modsSummaryTree.pack( side='left', fill='both', expand=True )
modsSummaryScroller.config( command=modsSummaryTree.yview )
modsSummaryScroller.pack( side='left', fill='y' )
modsSummaryRow.pack( fill='both', expand=True )
modsSummaryTree.originalSortOrder = []
requiredStandaloneFunctionsLabel = StringVar()
requiredStandaloneFunctionsLabel.set( 'Required Standalone Functions for the Installed Mods:' )
Label( summaryTab, textvariable=requiredStandaloneFunctionsLabel ).pack( padx=135, pady=(7, 7), anchor='w' )
standalonesSummaryRow = ttk.Frame( summaryTab, padding='80 0 80 0' )
standalonesSummaryScroller = Scrollbar( standalonesSummaryRow )
standalonesSummaryTree = ttk.Treeview( standalonesSummaryRow, selectmode='none', columns=('refCount', 'location'), yscrollcommand=standalonesSummaryScroller.set )
standalonesSummaryTree.heading( '#0', anchor='center', text='Function Name' )
standalonesSummaryTree.column( '#0', anchor='center', minwidth=290, stretch=1 )
standalonesSummaryTree.heading( 'refCount', anchor='center', text='Reference Count' )
standalonesSummaryTree.column( 'refCount', anchor='center', minwidth=110, stretch=1 )
standalonesSummaryTree.heading( 'location', anchor='center', text='Installation location (DOL Offset)' )
standalonesSummaryTree.column( 'location', anchor='center', minwidth=220, stretch=1 )
standalonesSummaryTree.pack( side='left', fill='both', expand=True )
standalonesSummaryScroller.config( command=standalonesSummaryTree.yview )
standalonesSummaryScroller.pack( side='left', fill='y' )
standalonesSummaryRow.pack( fill='x', expand=True )
modsSummaryContextMenu = summaryContextMenu( root )
# Define some colors and an event handler for the above treeviews
modsSummaryTree.tag_configure( 'selected', background='#b9c3ff' ) # more blue; takes priority over lower config
modsSummaryTree.tag_configure( 'mainEntry', background='#d9e3ff' ) # light blue
modsSummaryTree.bind( '<1>', updateSummaryTreeSelection )
modsSummaryTree.bind( '<3>', onModSummaryTreeRightClick )
standalonesSummaryTree.tag_configure( 'selected', background='#b9c3ff' ) # more blue; takes priority over lower config
standalonesSummaryTree.tag_configure( 'mainEntry', background='#d9e3ff' ) # light blue
standalonesSummaryTree.bind( '<1>', updateSummaryTreeSelection )
standalonesSummaryTree.bind( '<3>', onStandalonesSummaryTreeRightClick )
# Tab 5 | Tools
toolsTab = ttk.Frame( mainNotebook, padding="15 9 15 9", takefocus=True )
mainNotebook.add( toolsTab, text=' Tools ' )
toolsTab.row1 = Frame( toolsTab )
numberConverter = ttk.Labelframe(toolsTab.row1, text=' Number Conversion ', labelanchor='n', padding="15 12 15 10")
Label(numberConverter, text='Decimal:').grid(row=0,column=0)
decimalNum = StringVar()
ttk.Entry(numberConverter, width=19, textvariable=decimalNum, font='TkTextFont').grid(row=0,column=1)
Label(numberConverter, text='Hex (unsigned):').grid(row=1,column=0)
hexNum = StringVar()
ttk.Entry(numberConverter, width=19, textvariable=hexNum, font='TkTextFont').grid(row=1,column=1)
Label(numberConverter, text='32-bit float:\n(signed)').grid(row=2,column=0)
floatNum = StringVar()
ttk.Entry(numberConverter, width=19, textvariable=floatNum, font='TkTextFont').grid(row=2,column=1)
Label(numberConverter, text='64-bit float:\n(signed double)').grid(row=3,column=0)
doubleNum = StringVar()
ttk.Entry(numberConverter, width=19, textvariable=doubleNum, font='TkTextFont').grid(row=3,column=1)
numberConverterChildren = numberConverter.winfo_children()
numberConverterChildren[1].bind("<KeyRelease>", lambda event: convertDec( event, decimalNum ))
numberConverterChildren[3].bind("<KeyRelease>", lambda event: convertHex( event, hexNum ))
numberConverterChildren[5].bind("<KeyRelease>", lambda event: convertFloat( event, floatNum ))
numberConverterChildren[7].bind("<KeyRelease>", lambda event: convertDouble( event, doubleNum ))
numberConverter.grid(row=0, column=0, sticky='e' )
offsetConverter = ttk.Labelframe( toolsTab.row1, text=' RAM Address Conversion ', labelanchor='n', padding="15 12 15 0" ) # Padding: L, T, R, B
def populateRamAddressConverter():
ttk.Label( offsetConverter, text='RAM Address:' ).grid( row=0, column=0 )
entry = ttk.Entry( offsetConverter, width=12, font='TkTextFont' )
entry.grid( row=0, column=1 )
ttk.Label( offsetConverter, text='DOL Offsets: ' ).grid( row=1, column=0, columnspan=2 )
genGlobals['originalDolRevisions'] = listValidOriginalDols()
if len( genGlobals['originalDolRevisions'] ) < 6: pady = 1
else: pady = 0
ramToDolNotesLabel = ttk.Label( offsetConverter, textvariable=ramToDolNotes, justify='center' )
# Add an entry field for conversion input for each DOL found in the Original DOLs folder
if genGlobals['originalDolRevisions']:
entry.bind( "<KeyRelease>", convertRamOffset )
for i, dolRevision in enumerate( genGlobals['originalDolRevisions'] ):
ttk.Label( offsetConverter, text=dolRevision ).grid( row=i+2, column=0 )
entry = ttk.Entry( offsetConverter, width=13, font='TkTextFont' )
entry.grid( row=i+2, column=1, pady=pady )
entry.bind( "<KeyRelease>", convertDolOffset )
ramToDolNotesLabel.grid( row=len(genGlobals['originalDolRevisions'])+3, column=0, columnspan=2 )
else:
entry['state'] = 'disabled'
ramToDolNotes.set( 'Unavailable; no\noriginal DOLs found.' )
ramToDolNotesLabel.grid( row=3, column=0, columnspan=2, pady=(8, 14) )
ramToDolNotes = StringVar()
populateRamAddressConverter()
offsetConverter.grid( row=0, column=1 )
codeOffsetConverter = ttk.Labelframe( toolsTab.row1, text=' Code Offset Conversion ', labelanchor='n', padding=15 )
def populateCodeOffsetConverter():
if len( genGlobals['originalDolRevisions'] ) < 6: pady = 1
else: pady = 0
# Add an entry field for conversion input for each DOL found in the Original DOLs folder
if genGlobals['originalDolRevisions']:
for i, dolRevision in enumerate( genGlobals['originalDolRevisions'] ):
ttk.Label( codeOffsetConverter, text=dolRevision ).grid( row=i, column=0 )
entry = ttk.Entry( codeOffsetConverter, width=13, font='TkTextFont' )
entry.grid( row=i, column=1, pady=pady )
entry.revision = dolRevision
entry.bind( "<Return>", convertCodeOffset )
Label( codeOffsetConverter, text="(Press 'Enter' to search)" ).grid( row=i+1, column=0, columnspan=2 )
else:
Label( codeOffsetConverter, text='Unavailable; no\noriginal DOLs found.' ).grid( row=0, column=0, columnspan=2 )
populateCodeOffsetConverter()
codeOffsetConverter.grid( row=0, column=2, sticky='w' )
toolsTab.row1.columnconfigure( 0, weight=1 )
toolsTab.row1.columnconfigure( 1, weight=1 )
toolsTab.row1.columnconfigure( 2, weight=1 )
toolsTab.row1.rowconfigure( 0, weight=1 )
toolsTab.row1.pack( fill='x', pady=(15, 0) )
toolsTab.row3 = Frame( toolsTab )
Label( toolsTab.row3, text='\t\tASCII Text to Hex:', foreground='#03f' ).pack( anchor='nw', pady=(0, 3) )
text2hex = ScrolledText( toolsTab.row3, height=3, borderwidth=2, relief='groove' )
text2hex.pack( fill='both', expand=True, padx=3, pady=0 )
hex2text = ScrolledText( toolsTab.row3, height=3, borderwidth=2, relief='groove' )
hex2text.pack( fill='both', expand=True, padx=3, pady=5 )
text2hex.bind( '<KeyRelease>', text2hexConv )
hex2text.bind( '<KeyRelease>', text2hexConv )
Label( toolsTab.row3, text="\t\tMenu Text to Hex:", foreground='#03f' ).pack( anchor='nw', pady=3 )
menuText2hex = ScrolledText( toolsTab.row3, height=3, borderwidth=2, relief='groove' )
menuText2hex.pack( fill='both', expand=True, padx=3, pady=0 )
hex2menuText = ScrolledText( toolsTab.row3, height=3, borderwidth=2, relief='groove' )
hex2menuText.pack( fill='both', expand=True, padx=3, pady=5 )
menuText2hex.bind( '<KeyRelease>', menuText2hexConv )
hex2menuText.bind( '<KeyRelease>', menuText2hexConv )
toolsTab.row3.pack( fill='both', expand=1, padx=6 )
# End of tabbed interface.
mainNotebook.pack( fill='both', expand=1 )
mainNotebook.bind( '<<NotebookTabChanged>>', onTabChange )
rootFrame.pack( fill='both', expand=1 )
def dndHandler( event ):
root.deiconify() # Brings the main program window to the front (application z-order).
readRecievedFile( event.data )
playSound( 'menuChange' )
dnd.bindtarget( rootFrame, dndHandler, 'text/uri-list' )
rootFrame.bind( "<Configure>", onWindowResize )
discVersion = StringVar()
discVersion.set( '' )
Label( mainNotebook, textvariable=discVersion ).place( anchor='center', x=610, y=14 )
dolVersion = StringVar()
dolVersion.set( 'Nothing Loaded' )
Label( mainNotebook, textvariable=dolVersion ).place( anchor='center', x=694, y=14 )
# GUI Rendering complete. Initialize program bindings and variables.
root.deiconify()
# Scrollwheel and 'CTRL-A' support.
root.unbind_class( 'Text', '<MouseWheel>' ) # The global onMouseWheelScroll below will handle this too. Prevents problems when scrolling on top of other widgets
root.bind_all( "<MouseWheel>", onMouseWheelScroll )
root.bind_all( "<Control-s>", saveCurrentWork )
root.bind( "<Control-f>", enterSearchMode )
root.bind( "<Escape>", exitSearchMode )
root.bind_class( "Text", "<Control-a>", selectAll )
root.bind_class( "TEntry", "<Control-a>", selectAll )
# Initialize the dol and gecko information containers (essentially containers for globals and specific file-reading functions)
dol = dolInitializer()
gecko = geckoInitializer()
originalDols = {} # Container for the original DOL files, which are used for code references
# Set up logging output (comment this out if compiling while preserving the console)
if sys.argv[0][-4:] == '.exe': # If this code has been compiled....
if cmdArgs.debugMode:
sys.stdout = sys.stderr = open( 'Debug Log.txt', 'a' )
else:
# Nullify stdout and stderr, to prevent any odd errors
class NullWriter( object ):
def write( self, value ): pass
sys.stdout = sys.stderr = NullWriter()
# Initialize the program's audio
audioInitialized = False
try:
for fileNameWithExt in os.listdir( scriptHomeFolder + '\\sfx' ): # listdir gets folders too, but there aren't any expected here.
if fileNameWithExt.endswith( '.wav' ):
fileName = os.path.splitext( fileNameWithExt )[0] # No file extension
soundBank[fileName] = scriptHomeFolder + '\\sfx\\' + fileNameWithExt
audioInitialized = True
except Exception as e:
print 'Problem playing audio; audio drivers might not be installed or no sound device is available.'
print str(e)
# Either disable the Mods Library tab or populate the program with the available mods.
if onlyUpdateGameSettings.get():
mainNotebook.tab( 0, state='disabled' )
else:
scanModsLibrary( playAudio=False )
# Process any files drag-and-dropped onto the program icon (or provided via command line)
if cmdArgs.inputFile:
readRecievedFile( sys.argv[-1] )
# Enable the GUI's file input
fileEntry['state'] = 'normal'
mainOpenBtn['state'] = 'normal'
# Start the GUI's mainloop
fileEntry.focus()
playSound( 'menuChange' )
root.mainloop()
|
main.py
|
# -*- coding:utf-8 -*-
"""
Xi Gua video Million Heroes
"""
import logging.handlers
import multiprocessing
import os
import threading
import time
from argparse import ArgumentParser
from datetime import datetime
from functools import partial
from multiprocessing import Event, Pipe, Queue
from config import api_key, enable_chrome, use_monitor, image_compress_level, crop_areas
from config import api_version
from config import app_id
from config import app_key
from config import app_secret
from config import data_directory
from config import prefer
from core.android import save_screen, check_screenshot, get_adb_tool, analyze_current_screen_text
from core.check_words import parse_false
from core.chrome_search import run_browser
from core.crawler.baiduzhidao import baidu_count_daemon
from core.crawler.crawl import jieba_initialize, crawler_daemon
from core.ocr.baiduocr import get_text_from_image as bai_get_text
from core.ocr.spaceocr import get_text_from_image as ocrspace_get_text
from utils import stdout_template
from utils.backup import save_question_answers_to_file
from utils.process_stdout import ProcessStdout
logger = logging.getLogger("assistant")
handler = logging.handlers.WatchedFileHandler("assistant.log")
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
## jieba init
jieba_initialize()
if prefer[0] == "baidu":
get_text_from_image = partial(bai_get_text,
app_id=app_id,
app_key=app_key,
app_secret=app_secret,
api_version=api_version,
timeout=5)
elif prefer[0] == "ocrspace":
get_test_from_image = partial(ocrspace_get_text, api_key=api_key)
def parse_args():
parser = ArgumentParser(description="Million Hero Assistant")
parser.add_argument(
"-t", "--timeout",
type=int,
default=5,
help="default http request timeout"
)
return parser.parse_args()
def parse_question_and_answer(text_list):
question = ""
start = 0
for i, keyword in enumerate(text_list):
question += keyword
if "?" in keyword:
start = i + 1
break
real_question = question.split(".")[-1]
for char, repl in [("以下", ""), ("下列", "")]:
real_question = real_question.replace(char, repl, 1)
question, true_flag = parse_false(real_question)
return true_flag, real_question, question, text_list[start:]
def pre_process_question(keyword):
"""
strip charactor and strip ?
:param question:
:return:
"""
now = datetime.today()
for char, repl in [("“", ""), ("”", ""), ("?", ""), ("《", ""), ("》", ""), ("我国", "中国"),
("今天", "{0}年{1}月{2}日".format(now.year, now.month, now.day)),
("今年", "{0}年".format(now.year)),
("这个月", "{0}年{1}月".format(now.year, now.month))]:
keyword = keyword.replace(char, repl)
keyword = keyword.split(r".")[-1]
keywords = keyword.split(" ")
keyword = "".join([e.strip("\r\n") for e in keywords if e])
return keyword
def prompt_message():
global game_type
print("""
请选择答题节目:
1. 百万英雄
2. 冲顶大会
3. 芝士超人
4. UC答题
5. 自适应
""")
game_type = input("输入节目序号: ")
if game_type == "1":
game_type = '百万英雄'
elif game_type == "2":
game_type = '冲顶大会'
elif game_type == "3":
game_type = "芝士超人"
elif game_type == "4":
game_type = "UC答题"
elif game_type == "5":
game_type = "自适应"
else:
game_type = '自适应'
def main():
args = parse_args()
timeout = args.timeout
adb_bin = get_adb_tool()
if use_monitor:
os.system("{0} connect 127.0.0.1:62001".format(adb_bin))
check_screenshot(filename="screenshot.png", directory=data_directory)
std_pipe = ProcessStdout()
## spaw baidu count
baidu_queue = Queue(5)
baidu_search_job = multiprocessing.Process(target=baidu_count_daemon,
args=(baidu_queue, std_pipe.queue, timeout))
baidu_search_job.daemon = True
baidu_search_job.start()
## spaw crawler
knowledge_queue = Queue(5)
knowledge_craw_job = multiprocessing.Process(target=crawler_daemon,
args=(knowledge_queue, std_pipe.queue))
knowledge_craw_job.daemon = True
knowledge_craw_job.start()
## output threading
output_job = threading.Thread(target=std_pipe.run_forever)
output_job.daemon = True
output_job.start()
if enable_chrome:
closer = Event()
noticer = Event()
noticer.clear()
reader, writer = Pipe()
browser_daemon = multiprocessing.Process(
target=run_browser, args=(closer, noticer, reader,))
browser_daemon.daemon = True
browser_daemon.start()
def __inner_job():
start = time.time()
image_binary = analyze_current_screen_text(
directory=data_directory,
compress_level=image_compress_level[0],
crop_area=crop_areas[game_type]
)
if not image_binary:
print("do not detect question and answers")
return
keywords = get_text_from_image(
image_data=image_binary,
timeout=timeout
)
if not keywords:
print("text not recognize")
return
true_flag, real_question, question, answers = parse_question_and_answer(keywords)
### parse for answer
answers = map(lambda a: a.rsplit(":")[-1], answers)
answers = list(map(lambda a: a.rsplit(".")[-1], answers))
std_pipe.write(stdout_template.QUESTION_TPL.format(real_question, "\n".join(answers)))
# notice baidu and craw
baidu_queue.put((
question, answers, true_flag
))
knowledge_queue.put(question)
if enable_chrome:
writer.send(question)
noticer.set()
end = time.time()
std_pipe.write(stdout_template.TIME_CONSUME_TPL.format(end - start))
save_screen(directory=data_directory)
save_question_answers_to_file(real_question, answers, directory=data_directory)
prompt_message()
while True:
enter = input("按Enter键开始,切换游戏请输入s,按ESC键退出...\n")
if enter == chr(27):
break
if enter == 's':
prompt_message()
try:
__inner_job()
except Exception as e:
logger.error(str(e), exc_info=True)
print("欢迎下次使用")
if enable_chrome:
reader.close()
writer.close()
closer.set()
time.sleep(3)
if __name__ == "__main__":
multiprocessing.freeze_support()
main()
|
pose_camera.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
from functools import partial
import re
import time
import os
import threading
import time
import copy
from azure.iot.device import IoTHubDeviceClient
from colorama import Fore
from colorama import Style
import numpy as np
from PIL import Image
import svgwrite
import gstreamer
from pose_engine import PoseEngine
CONNECTION_STRING = "HostName=ProjectEagleIoTHubsnqdqexxbvhve.azure-devices.net;DeviceId=JetsonPoC;SharedAccessKey=wmDlJybyGEwkEpjCKCBQjg4StBAgZWBEQFbuqMnWASU="
EDGES = (
('nose', 'left eye'),
('nose', 'right eye'),
('nose', 'left ear'),
('nose', 'right ear'),
('left ear', 'left eye'),
('right ear', 'right eye'),
('left eye', 'right eye'),
('left shoulder', 'right shoulder'),
('left shoulder', 'left elbow'),
('left shoulder', 'left hip'),
('right shoulder', 'right elbow'),
('right shoulder', 'right hip'),
('left elbow', 'left wrist'),
('right elbow', 'right wrist'),
('left hip', 'right hip'),
('left hip', 'left knee'),
('right hip', 'right knee'),
('left knee', 'left ankle'),
('right knee', 'right ankle'),
)
def iothub_client_init():
# Create an IoT Hub Client
client = IoTHubDeviceClient.create_from_connection_string(CONNECTION_STRING)
return client
def iothub_client_send_telemetry(message):
try:
client = iothub_client_init()
client.send_message(message)
except KeyboardInterrupt:
print("IoT Hub error")
def shadow_text(dwg, x, y, text, font_size=16):
dwg.add(dwg.text(text, insert=(x + 1, y + 1), fill='black',
font_size=font_size, style='font-family:sans-serif'))
dwg.add(dwg.text(text, insert=(x, y), fill='white',
font_size=font_size, style='font-family:sans-serif'))
def draw_pose(dwg, pose, src_size, inference_box, color='yellow', threshold=0.2):
box_x, box_y, box_w, box_h = inference_box
scale_x, scale_y = src_size[0] / box_w, src_size[1] / box_h
xys = {}
for label, keypoint in pose.keypoints.items():
if keypoint.score < threshold: continue
# Offset and scale to source coordinate space.
kp_y = int((keypoint.yx[0] - box_y) * scale_y)
kp_x = int((keypoint.yx[1] - box_x) * scale_x)
xys[label] = (kp_x, kp_y)
dwg.add(dwg.circle(center=(int(kp_x), int(kp_y)), r=5,
fill='cyan', fill_opacity=keypoint.score, stroke=color))
for a, b in EDGES:
if a not in xys or b not in xys: continue
ax, ay = xys[a]
bx, by = xys[b]
dwg.add(dwg.line(start=(ax, ay), end=(bx, by), stroke=color, stroke_width=2))
def avg_fps_counter(window_size):
window = collections.deque(maxlen=window_size)
prev = time.monotonic()
yield 0.0 # First fps value.
while True:
curr = time.monotonic()
window.append(curr - prev)
prev = curr
yield len(window) / sum(window)
def run(inf_callback, render_callback):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--mirror', help='flip video horizontally', action='store_true')
parser.add_argument('--model', help='.tflite model path.', required=False)
parser.add_argument('--res', help='Resolution', default='640x480',
choices=['480x360', '640x480', '1280x720'])
parser.add_argument('--videosrc', help='Which video source to use', default='/dev/video0')
parser.add_argument('--h264', help='Use video/x-h264 input', action='store_true')
parser.add_argument('--jpeg', help='Use image/jpeg input', action='store_true')
args = parser.parse_args()
default_model = 'models/mobilenet/posenet_mobilenet_v1_075_%d_%d_quant_decoder_edgetpu.tflite'
if args.res == '480x360':
src_size = (640, 480)
appsink_size = (480, 360)
model = args.model or default_model % (353, 481)
elif args.res == '640x480':
src_size = (640, 480)
appsink_size = (640, 480)
model = args.model or default_model % (481, 641)
elif args.res == '1280x720':
src_size = (1280, 720)
appsink_size = (1280, 720)
model = args.model or default_model % (721, 1281)
print('Loading model: ', model)
engine = PoseEngine(model)
input_shape = engine.get_input_tensor_shape()
inference_size = (input_shape[2], input_shape[1])
gstreamer.run_pipeline(partial(inf_callback, engine), partial(render_callback, engine),
src_size, inference_size,
mirror=args.mirror,
videosrc=args.videosrc,
h264=args.h264,
jpeg=args.jpeg
)
def main():
n = 0
sum_process_time = 0
sum_inference_time = 0
ctr = 0
fps_counter = avg_fps_counter(30)
previous_pose = None
def run_inference(engine, input_tensor):
return engine.run_inference(input_tensor)
def render_overlay(engine, output, src_size, inference_box):
nonlocal n, sum_process_time, sum_inference_time, fps_counter, previous_pose
svg_canvas = svgwrite.Drawing('', size=src_size)
start_time = time.monotonic()
outputs, inference_time = engine.ParseOutput(output)
end_time = time.monotonic()
n += 1
sum_process_time += 1000 * (end_time - start_time)
sum_inference_time += inference_time
avg_inference_time = sum_inference_time / n
text_line = 'PoseNet: %.1fms (%.2f fps) TrueFPS: %.2f Nposes %d' % (
avg_inference_time, 1000 / avg_inference_time, next(fps_counter), len(outputs)
)
shadow_text(svg_canvas, 10, 20, text_line)
for pose in outputs:
if pose.score < 0.4: continue
message = "{\"score\":" + str(pose.score) + ","
for label, keypoint in pose.keypoints.items():
message += "\"" + label.replace(" ", "-") + "-x\": " + str(keypoint.yx[1]) + ",\"" + label.replace(" ", "-") + "-y\": " + str(keypoint.yx[0]) + ",\"" + label.replace(" ", "-") + "-score\": " + str(keypoint.score) + ","
if previous_pose is not None and (previous_pose.keypoints["nose"].yx[0] - pose.keypoints["nose"].yx[0]) < -20:
message += "\"fall\": 1"
print(f"{Fore.RED}FALL{Style.RESET_ALL}")
else:
message += "\"fall\": 0"
message += "}"
if previous_pose is not None:
# print(f'Previous nose: {str(previous_pose.keypoints["nose"].yx[0])}')
print(f'Difference: {str(previous_pose.keypoints["nose"].yx[0] - pose.keypoints["nose"].yx[0])}')
try:
# Execute the IoT hub send message on a thread so we don't slow the pose estimation
iothub_client_send_telemetry(message)
#hubThread = threading.Thread(target=iothub_client_send_telemetry, args=(message,))
#hubThread.start()
except KeyboardInterrupt:
print("IoT Hub error")
print(message)
draw_pose(svg_canvas, pose, src_size, inference_box)
previous_pose = copy.deepcopy(pose)
time.sleep(.25)
return (svg_canvas.tostring(), False)
run(run_inference, render_overlay)
if __name__ == '__main__':
main()
|
Bullet.py
|
from tkinter import Label
import threading
class Bullet(Label):
def __init__(self, x, y, space):
self.space = space
self.bullet_timer = 0.01
self.bullet_indicator = "'"
self.damage = -100
Label.__init__(self, text=self.bullet_indicator)
self.pack()
self._x = x
self._y = y
self._observers = []
def start(self):
process = threading.Thread(target=self.place_bullet)
process.start()
def place_bullet(self):
if self._y > 0:
self.set_y(-1)
self.place(x=self._x, y=self._y)
process = threading.Timer(self.bullet_timer, self.place_bullet, [])
process.start()
else:
self.set_y(self.space.height)
self.place(x=self._x, y=self._y)
process = threading.Timer(self.bullet_timer, self.place_bullet, [])
process.start()
def get_y(self):
return self._y
def set_y(self, value):
self._y += value
for callback in self._observers:
callback(thing=self)
y = property(get_y, set_y)
def bind_to(self, callback):
self._observers.append(callback)
def hit(self):
self.destroy()
|
agent.py
|
def goAirportAgent(parent=None, communication_line=None):
import os
import sys
import signal
import ctypes
import multiprocessing
from time import sleep
from threading import Thread
from time import sleep
from random import random
import speech_recognition as sr
import responder
import airportAgent_functions as aaf
r = sr.Recognizer()
# Set
_i = 0
driver = None
_worked = None
currentUserTicket = None
_asking_for_flight = False
_asking_for_lost = False
_asking_for_hotel = False
_asking_for_taxi = False
_hold_number = False
_hold_destination = False
_monitor_stop = 0
if communication_line is None:
communication_line = random()
with open("docs/settings.txt", "r") as f:
settings = [i.rstrip() for i in f.readlines()]
reset_after_being_idle_for = int(settings[7].split(" = ")[-1])
input_device_index = settings[9].split(" = ")[-1]
output_device_index = settings[10].split(" = ")[-1]
offline_text_to_speech = int(settings[0].split(" = ")[-1].rstrip())
aaf.clearer()
print("Please wait...")
def reset():
'''Resets current session's variables and activities'''
global currentUserTicket, _asking_for_flight, _asking_for_lost, _asking_for_hotel, _asking_for_taxi, _hold_number, _hold_destination
aaf.cache_clearer()
currentUserTicket = 33
_asking_for_flight = 33
_asking_for_lost = 33
_asking_for_hotel = 33
_asking_for_taxi = 33
_hold_number = 33
_hold_destination = 33
# If it's a ctypes object, treat it differently
if "sharedctypes" in str(type(communication_line)):
communication_line.value = "/go_red".encode()
aaf.kill_chrome(driver)
def resetter():
while not _monitor_stop:
sleep(3)
# Reset variables if no user talked to the agent for X seconds
if (responder.last_activity + reset_after_being_idle_for) > aaf.current_seconds():
pass
else:
reset()
# Realtime monitoring whether if the agent is idle.
Thread(target=resetter).start()
while 1:
try:
# Set listening port
# if 'auto' .. try all indexes till you find one working.
# if already set to an index, then use that.
with sr.Microphone(device_index=(_i if input_device_index.lower() == "auto" else int(input_device_index))) as source:
print("Input Device Index:", (str(_i) + " (auto)" if input_device_index.lower() == "auto" else int(input_device_index)))
print("Output Device Index:", output_device_index)
_worked = _i # If no error raised at device_index=_i,
# then the said _i is a source of voice input
aaf.clearer() # Clear the screen
print("Listening...")
# if agent launched without animations
if not communication_line:
aaf.cache_clearer()
while 1: # Keep listening
# Filter noise
r.adjust_for_ambient_noise(source)
# Listen to the port (the source)
audio = r.listen(source)
try:
# Send then hold what Googgle's Speech-to-Text returns
text = r.recognize_google(audio)
# Respond or do an action
refresh_vars = responder.responder(text,
communication_line,
(aaf.say2 if offline_text_to_speech else aaf.say1),
aaf.clearer,
currentUserTicket,
_asking_for_flight,
_asking_for_lost,
_asking_for_hotel,
_asking_for_taxi,
_hold_number,
_hold_destination
)
# Refresh variables
currentUserTicket = refresh_vars[0]
_asking_for_flight = refresh_vars[1]
_asking_for_lost = refresh_vars[2]
_asking_for_hotel = refresh_vars[3]
_asking_for_taxi = refresh_vars[4]
_hold_number = refresh_vars[5]
_hold_destination = refresh_vars[6]
driver = refresh_vars[7]
# Reset if Idle for more than X seconds
# Exit from the listening loop if the session ended
except SystemExit:
# Let resetter know that execution stopped
_monitor_stop = 1
# clear current session's activity
aaf.cache_clearer()
# exit chrome if currently working
if driver:
aaf.kill_chrome(driver)
# Remove voice outputs
output_file = 'output_' + str(id(communication_line))
if os.path.exists(os.path.join(os.getcwd(), output_file + ".wav")):
os.remove(os.path.join(os.getcwd(), output_file + ".wav"))
if os.path.exists(os.path.join(os.getcwd(), output_file + ".mp3")):
os.remove(os.path.join(os.getcwd(), output_file + ".mp3"))
# kill parent (Animations; If initialized from there)
if parent:
os.kill(parent, signal.SIGTERM)
# kill self
os.kill(os.getpid(), signal.SIGTERM)
# Handle the error if voice was not recognized
except sr.UnknownValueError:
print("Sorry I didn't hear that. Can you repeat that?")
except Exception as e:
print(e)
sleep(5)
# Inform the user if the device at index of '_i' was not found
except AssertionError:
print(f"Device at device_index={_i} was not found, trying another"
" one.")
sleep(3)
# Check if the input source is being used by another device
except OSError as e:
if e.errno == -9998:
aaf.clearer()
print(f"device_index at {_i} is being used by another program"
" or not available. Trying another one")
sleep(2)
else:
print(e)
sleep(2)
# If no input device found at index of '_i', then try another one
if _worked is None and input_device_index.lower() == "auto":
_i += 1
# If it wasn't auto, and reached this place, then the above while
# already finished exectuing, therefore, break.
else:
break
if __name__ == "__main__":
goAirportAgent()
|
hnsentiment.py
|
#/usr/bin/python3
import asyncio
import concurrent.futures
import requests
import json
import threading
from nltk.sentiment.vader import SentimentIntensityAnalyzer
HN_TOP_STORIES_URL = "https://hacker-news.firebaseio.com/v0/topstories.json"
HN_ITEM_QUERY_BASE_URL = "https://hacker-news.firebaseio.com/v0/item/"
stories = {}
comments = {}
def build_comments(comment_id_list, story_id, count_id):
for comment_id in comment_id_list:
comment = json.loads(requests.get(HN_ITEM_QUERY_BASE_URL + str(comment_id) + ".json").text)
comments[story_id].append(comment)
if("kids" in comment):
build_comments(comment["kids"], story_id, -1)
if count_id > 0:
print(str(count_id) + "done")
async def build_stories():
top_story_ids = json.loads(requests.get(HN_TOP_STORIES_URL).text)
futures = []
count = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
loop = asyncio.get_event_loop()
for story_id in top_story_ids:
count += 1
story = json.loads(requests.get(HN_ITEM_QUERY_BASE_URL + str(story_id) + ".json").text)
print(story)
stories[story_id] = story
comments[story_id] = []
print("Building Comments for Story ID " + str(story_id) + " (" + str(count) + " of " + str(len(top_story_ids)) + ")")
if("kids" in story):
#thr = threading.Thread(target=build_comments, args=(story["kids"], story_id,))
futures.append(loop.run_in_executor(executor, build_comments, story["kids"], story_id, count))
if count == 5: break #for debug only look at 5 stories
print("Waiting for requests to complete")
await asyncio.gather(*futures)
def add_sentiment_to_comments():
sia = SentimentIntensityAnalyzer()
for story_comment_list in comments.values():
for comment in story_comment_list:
if "text" in comment:
comment["sentiment"] = sia.polarity_scores(comment["text"])
print(comment) # here's where to add sentiment using nltk to text
def add_sentiment_to_stories():
for story_id in stories:
sentiments_of_story = [comment["sentiment"]["compound"] for comment in comments[story_id] if "sentiment" in comment ]
stories[story_id]["sentiment"] = sum(sentiments_of_story) / float(len(sentiments_of_story))
print(stories[story_id]["sentiment"])
if __name__ =='__main__':
print("Retrieving all comments through Hacker News API")
print("-----------------------------------------------")
loop = asyncio.get_event_loop()
loop.run_until_complete(build_stories())
loop.close()
print("-----------------------------------------------")
print("Retrieving Sentiment for Comments")
print("---------------------------------")
add_sentiment_to_comments()
print("Computing Sentiment for Stories")
print("-------------------------------")
add_sentiment_to_stories()
|
test_nanny.py
|
import asyncio
import gc
import logging
import multiprocessing as mp
import os
import random
import sys
from contextlib import suppress
import pytest
from tlz import first, valmap
from tornado.ioloop import IOLoop
import dask
from distributed import Client, Nanny, Scheduler, Worker, rpc, wait, worker
from distributed.core import CommClosedError, Status
from distributed.diagnostics import SchedulerPlugin
from distributed.metrics import time
from distributed.protocol.pickle import dumps
from distributed.utils import TimeoutError, parse_ports, tmpfile
from distributed.utils_test import captured_logger, gen_cluster, gen_test, inc
# FIXME why does this leave behind unclosed Comm objects?
@gen_cluster(nthreads=[], allow_unclosed=True)
async def test_nanny(s):
async with Nanny(s.address, nthreads=2, loop=s.loop) as n:
async with rpc(n.address) as nn:
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.kill()
assert not n.is_alive()
start = time()
while n.worker_address in s.workers:
assert time() < start + 1
await asyncio.sleep(0.01)
await nn.kill()
assert not n.is_alive()
assert n.worker_address not in s.workers
await nn.instantiate()
assert n.is_alive()
[ws] = s.workers.values()
assert ws.nthreads == 2
assert ws.nanny == n.address
await nn.terminate()
assert not n.is_alive()
@gen_cluster(nthreads=[])
async def test_many_kills(s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
assert n.is_alive()
await asyncio.gather(*(n.kill() for _ in range(5)))
await asyncio.gather(*(n.kill() for _ in range(5)))
await n.close()
@gen_cluster(Worker=Nanny)
async def test_str(s, a, b):
assert a.worker_address in str(a)
assert a.worker_address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
@gen_cluster(nthreads=[], client=True)
async def test_nanny_process_failure(c, s):
n = await Nanny(s.address, nthreads=2, loop=s.loop)
first_dir = n.worker_dir
assert os.path.exists(first_dir)
original_address = n.worker_address
ww = rpc(n.worker_address)
await ww.update_data(data=valmap(dumps, {"x": 1, "y": 2}))
pid = n.pid
assert pid is not None
with suppress(CommClosedError):
await c.run(os._exit, 0, workers=[n.worker_address])
start = time()
while n.pid == pid: # wait while process dies and comes back
await asyncio.sleep(0.01)
assert time() - start < 5
start = time()
await asyncio.sleep(1)
while not n.is_alive(): # wait while process comes back
await asyncio.sleep(0.01)
assert time() - start < 5
# assert n.worker_address != original_address # most likely
start = time()
while n.worker_address not in s.nthreads or n.worker_dir is None:
await asyncio.sleep(0.01)
assert time() - start < 5
second_dir = n.worker_dir
await n.close()
assert not os.path.exists(second_dir)
assert not os.path.exists(first_dir)
assert first_dir != n.worker_dir
await ww.close_rpc()
s.stop()
@gen_cluster(nthreads=[])
async def test_run(s):
pytest.importorskip("psutil")
n = await Nanny(s.address, nthreads=2, loop=s.loop)
with rpc(n.address) as nn:
response = await nn.run(function=dumps(lambda: 1))
assert response["status"] == "OK"
assert response["result"] == 1
await n.close()
@pytest.mark.slow
@gen_cluster(config={"distributed.comm.timeouts.connect": "1s"})
async def test_no_hang_when_scheduler_closes(s, a, b):
# https://github.com/dask/distributed/issues/2880
with captured_logger("tornado.application", logging.ERROR) as logger:
await s.close()
await asyncio.sleep(1.2)
assert a.status == Status.closed
assert b.status == Status.closed
out = logger.getvalue()
assert "Timed out trying to connect" not in out
@pytest.mark.slow
@gen_cluster(
Worker=Nanny, nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False}
)
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != Status.closed:
await asyncio.sleep(0.05)
assert time() < start + 9
class Something(Worker):
# a subclass of Worker which is not Worker
pass
@gen_cluster(client=True, Worker=Nanny)
async def test_nanny_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Worker" in list(out.values())[0]
assert w1.Worker is Worker
@gen_cluster(client=True, Worker=Nanny, worker_kwargs={"worker_class": Something})
async def test_nanny_alt_worker_class(c, s, w1, w2):
out = await c._run(lambda dask_worker=None: str(dask_worker.__class__))
assert "Something" in list(out.values())[0]
assert w1.Worker is Something
@pytest.mark.slow
@gen_cluster(client=False, nthreads=[])
async def test_nanny_death_timeout(s):
await s.close()
w = Nanny(s.address, death_timeout=1)
with pytest.raises(TimeoutError):
await w
assert w.status == Status.closed
@gen_cluster(client=True, Worker=Nanny)
async def test_random_seed(c, s, a, b):
async def check_func(func):
x = c.submit(func, 0, 2 ** 31, pure=False, workers=a.worker_address)
y = c.submit(func, 0, 2 ** 31, pure=False, workers=b.worker_address)
assert x.key != y.key
x = await x
y = await y
assert x != y
await check_func(lambda a, b: random.randint(a, b))
np = pytest.importorskip("numpy")
await check_func(lambda a, b: np.random.randint(a, b))
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="num_fds not supported on windows"
)
@gen_cluster(client=False, nthreads=[])
async def test_num_fds(s):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
# Warm up
w = await Nanny(s.address)
await w.close()
del w
gc.collect()
before = proc.num_fds()
for i in range(3):
w = await Nanny(s.address)
await asyncio.sleep(0.1)
await w.close()
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
await asyncio.sleep(0.1)
assert time() < start + 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(client=True, nthreads=[])
async def test_worker_uses_same_host_as_nanny(c, s):
for host in ["tcp://0.0.0.0", "tcp://127.0.0.2"]:
n = await Nanny(s.address, host=host)
def func(dask_worker):
return dask_worker.listener.listen_address
result = await c.run(func)
assert host in first(result.values())
await n.close()
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
s = await Scheduler(scheduler_file=fn, port=8008)
w = await Nanny(scheduler_file=fn)
assert set(s.workers) == {w.worker_address}
await w.close()
s.stop()
@gen_cluster(client=True, Worker=Nanny, nthreads=[("127.0.0.1", 2)])
async def test_nanny_timeout(c, s, a):
x = await c.scatter(123)
with captured_logger(
logging.getLogger("distributed.nanny"), level=logging.ERROR
) as logger:
response = await a.restart(timeout=0.1)
out = logger.getvalue()
assert "timed out" in out.lower()
start = time()
while x.status != "cancelled":
await asyncio.sleep(0.1)
assert time() < start + 7
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
worker_kwargs={"memory_limit": 1e8},
timeout=20,
clean_kwargs={"threads": False},
)
async def test_nanny_terminate(c, s, a):
from time import sleep
def leak():
L = []
while True:
L.append(b"0" * 5000000)
sleep(0.01)
proc = a.process.pid
with captured_logger(logging.getLogger("distributed.nanny")) as logger:
future = c.submit(leak)
start = time()
while a.process.pid == proc:
await asyncio.sleep(0.1)
assert time() < start + 10
out = logger.getvalue()
assert "restart" in out.lower()
assert "memory" in out.lower()
@gen_cluster(
nthreads=[("127.0.0.1", 1)] * 8,
client=True,
Worker=Worker,
clean_kwargs={"threads": False},
)
async def test_throttle_outgoing_connections(c, s, a, *workers):
# But a bunch of small data on worker a
await c.run(lambda: logging.getLogger("distributed.worker").setLevel(logging.DEBUG))
remote_data = c.map(
lambda x: b"0" * 10000, range(10), pure=False, workers=[a.address]
)
await wait(remote_data)
def pause(dask_worker):
# Patch paused and memory_monitor on the one worker
# This is is very fragile, since a refactor of memory_monitor to
# remove _memory_monitoring will break this test.
dask_worker._memory_monitoring = True
dask_worker.paused = True
dask_worker.outgoing_current_count = 2
await c.run(pause, workers=[a.address])
requests = [
await a.get_data(await w.rpc.connect(w.address), keys=[f.key], who=w.address)
for w in workers
for f in remote_data
]
await wait(requests)
wlogs = await c.get_worker_logs(workers=[a.address])
wlogs = "\n".join(x[1] for x in wlogs[a.address])
assert "throttling" in wlogs.lower()
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
nanny = await Nanny(s.address, loop=s.loop, memory_limit=0)
typ = await c.run(lambda dask_worker: type(dask_worker.data))
assert typ == {nanny.worker_address: dict}
pcs = await c.run(lambda dask_worker: list(dask_worker.periodic_callbacks))
assert "memory" not in pcs
assert "memory" not in nanny.periodic_callbacks
future = c.submit(inc, 1)
assert await future == 2
await asyncio.sleep(0.02)
await c.submit(inc, 2) # worker doesn't pause
await nanny.close()
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
nanny = await Nanny(loop=s.loop)
assert nanny.scheduler.address == s.address
start = time()
while not s.workers:
await asyncio.sleep(0.1)
assert time() < start + 10
await nanny.close()
@pytest.mark.slow
@gen_test()
async def test_wait_for_scheduler():
with captured_logger("distributed") as log:
w = Nanny("127.0.0.1:44737")
IOLoop.current().add_callback(w.start)
await asyncio.sleep(6)
await w.close()
log = log.getvalue()
assert "error" not in log.lower(), log
assert "restart" not in log.lower(), log
@gen_cluster(nthreads=[], client=True)
async def test_environment_variable(c, s):
a = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "123"})
b = Nanny(s.address, loop=s.loop, memory_limit=0, env={"FOO": "456"})
await asyncio.gather(a, b)
results = await c.run(lambda: os.environ["FOO"])
assert results == {a.worker_address: "123", b.worker_address: "456"}
await asyncio.gather(a.close(), b.close())
@gen_cluster(nthreads=[], client=True)
async def test_data_types(c, s):
w = await Nanny(s.address, data=dict)
r = await c.run(lambda dask_worker: type(dask_worker.data))
assert r[w.worker_address] == dict
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Nanny(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
assert w.process.worker_dir.count("dask-worker-space") == 1
await w.close()
def _noop(x):
"""Define here because closures aren't pickleable."""
pass
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_process_worker_no_daemon(c, s, a):
def multiprocessing_worker():
p = mp.Process(target=_noop, args=(None,))
p.start()
p.join()
await c.submit(multiprocessing_worker)
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
Worker=Nanny,
config={"distributed.worker.daemon": False},
)
async def test_mp_pool_worker_no_daemon(c, s, a):
def pool_worker(world_size):
with mp.Pool(processes=world_size) as p:
p.map(_noop, range(world_size))
await c.submit(pool_worker, 4)
@pytest.mark.asyncio
async def test_nanny_closes_cleanly(cleanup):
async with Scheduler() as s:
n = await Nanny(s.address)
assert n.process.pid
proc = n.process.process
await n.close()
assert not n.process
assert not proc.is_alive()
assert proc.exitcode == 0
@pytest.mark.slow
@pytest.mark.asyncio
async def test_lifetime(cleanup):
counter = 0
event = asyncio.Event()
class Plugin(SchedulerPlugin):
def add_worker(self, **kwargs):
pass
def remove_worker(self, **kwargs):
nonlocal counter
counter += 1
if counter == 2: # wait twice, then trigger closing event
event.set()
async with Scheduler() as s:
s.add_plugin(Plugin())
async with Nanny(s.address) as a:
async with Nanny(s.address, lifetime="500 ms", lifetime_restart=True) as b:
await event.wait()
@pytest.mark.asyncio
async def test_nanny_closes_cleanly_2(cleanup):
async with Scheduler() as s:
async with Nanny(s.address) as n:
async with Client(s.address, asynchronous=True) as client:
with client.rpc(n.worker_address) as w:
IOLoop.current().add_callback(w.terminate)
start = time()
while n.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5
assert n.status == Status.closed
@pytest.mark.asyncio
async def test_config(cleanup):
async with Scheduler() as s:
async with Nanny(s.address, config={"foo": "bar"}) as n:
async with Client(s.address, asynchronous=True) as client:
config = await client.run(dask.config.get, "foo")
assert config[n.worker_address] == "bar"
@pytest.mark.asyncio
async def test_nanny_port_range(cleanup):
async with Scheduler() as s:
async with Client(s.address, asynchronous=True) as client:
nanny_port = "9867:9868"
worker_port = "9869:9870"
async with Nanny(s.address, port=nanny_port, worker_port=worker_port) as n1:
assert n1.port == 9867 # Selects first port in range
async with Nanny(
s.address, port=nanny_port, worker_port=worker_port
) as n2:
assert n2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Nanny"
): # No more ports left
async with Nanny(
s.address, port=nanny_port, worker_port=worker_port
):
pass
# Ensure Worker ports are in worker_port range
def get_worker_port(dask_worker):
return dask_worker.port
worker_ports = await client.run(get_worker_port)
assert list(worker_ports.values()) == parse_ports(worker_port)
class KeyboardInterruptWorker(worker.Worker):
"""A Worker that raises KeyboardInterrupt almost immediately"""
async def heartbeat(self):
def raise_err():
raise KeyboardInterrupt()
self.loop.add_callback(raise_err)
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
@pytest.mark.asyncio
async def test_nanny_closed_by_keyboard_interrupt(cleanup, protocol):
if protocol == "ucx": # Skip if UCX isn't available
pytest.importorskip("ucp")
async with Scheduler(protocol=protocol) as s:
async with Nanny(
s.address, nthreads=1, worker_class=KeyboardInterruptWorker
) as n:
n.auto_restart = False
await n.process.stopped.wait()
# Check that the scheduler has been notified about the closed worker
assert len(s.workers) == 0
class StartException(Exception):
pass
class BrokenWorker(worker.Worker):
async def start(self):
raise StartException("broken")
@pytest.mark.asyncio
async def test_worker_start_exception(cleanup):
# make sure this raises the right Exception:
with pytest.raises(StartException):
async with Nanny("tcp://localhost:1", worker_class=BrokenWorker) as n:
await n.start()
@pytest.mark.asyncio
async def test_failure_during_worker_initialization(cleanup):
with captured_logger(logger="distributed.nanny", level=logging.WARNING) as logs:
async with Scheduler() as s:
with pytest.raises(Exception):
async with Nanny(s.address, foo="bar") as n:
await n
assert "Restarting worker" not in logs.getvalue()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
from unittest import mock
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh512.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value by OpenSSL
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
s.connect(("svn.python.org", 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23,
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ssl.PROTOCOL_SSLv23,
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol()
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True,
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
dqn_train.py
|
"""
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
dqn algorithm used for controling the steer to make a vehicle keep lane.
Author:Team Li
"""
import tensorflow as tf
import cv2, math, sys, random, threading
from keep_lane.basic_net.dqn_utils import action_value_net
import RL.rl_utils as rl_tools
try:
sys.path.append('F:\my_project\driving-desicion-in-carla\dist/carla-0.9.4-py3.7-win-amd64.egg')
import carla
except:
raise ImportError('Please check your carla file')
from carla_utils.world_ops import *
from carla_utils.sensor_ops import *
tf.app.flags.DEFINE_string(
'checkpoint_dir', '../checkpoint',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'train_dir', '../checkpoint',
'Directory where checkpoints are written to.')
tf.app.flags.DEFINE_integer(
'batch_size', 20, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'total_epoches', 2000, 'The number of total epoches.')
tf.app.flags.DEFINE_integer(
'max_interations', 1000, 'The number of max interations in each epoches')
tf.app.flags.DEFINE_float('learning_rate', 1e-3, 'Initial learning rate.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 20,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'f_save_step', 15000,
'The frequency with which summaries are saved, in step.')
tf.app.flags.DEFINE_integer(
'n_egopilots', 5, 'the number of egopilots')
tf.app.flags.DEFINE_integer(
'img_height', 416,
'raw image height captured from carla')
tf.app.flags.DEFINE_integer(
'img_width', 626,
'raw image width captured from carla')
tf.app.flags.DEFINE_integer(
'net_img_height', 300,
'image height of network input')
tf.app.flags.DEFINE_integer(
'net_img_width', 300,
'raw image width of network input')
tf.app.flags.DEFINE_integer(
'n_action', 21,
'total discrete action in steer')
tf.app.flags.DEFINE_integer(
'e_desent_max_step', 100000,
'')
tf.app.flags.DEFINE_float(
'e_min_val', 0.01,
'')
tf.app.flags.DEFINE_integer(
'target_update_f', 1000,
'')
FLAGS = tf.app.flags.FLAGS
## carla config ##
semantic_camera_config = {'data_type': 'sensor.camera.semantic_segmentation', 'image_size_x': FLAGS.img_width,
'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=0.5, z=1.6)),
'attach_to':None}
bgr_camera_config = {'data_type': 'sensor.camera.rgb', 'image_size_x': FLAGS.img_width,
'image_size_y': FLAGS.img_height, 'fov': 110, 'sensor_tick': 0.02,
'transform': carla.Transform(carla.Location(x=0.5, z=1.6)),
'attach_to':None}
collision_sensor_config = {'data_type': 'sensor.other.collision','attach_to': None}
invasion_sensor_config = {'data_type': 'sensor.other.lane_detector', 'attach_to': None}
obstacle_sensor_config = {'data_type': 'sensor.other.obstacle', 'sensor_tick': 0.02,
'distance': 3, 'attach_to': None}
cornet_init_points = [12, 42, 63, 64, 70, 71, 72, 73, 86, 87, 94,
95, 105, 106, 107, 108, 109, 119, 120, 121, 122]
def gaussian_r(val, mu=30., sigma=10.):
"""calculate the reward of velocity
Args:
vel: velocity, km/h
Return:
a reward
"""
# if vel > 80:
# return 5.
r = math.exp(-((val - mu) ** 2) / (2 * sigma ** 2))
return r
def e_greedy(step, action_index):
r = random.uniform(0., 1.)
if step <= FLAGS.e_desent_max_step:
e = 1. - step*(1-FLAGS.e_min_val)/FLAGS.e_desent_max_step
if r <= e:
action_index = random.randint(0, FLAGS.n_action - 1)
return action_index
else:
return action_index
else:
if r <= 0.1:
action_index = random.randint(0, FLAGS.n_action - 1)
return action_index
else:
return action_index
def action_index_2_steer(action_index):
""" change the action index to steer val
Args:
action_index: an int between [0, n_action-1]
Return:
a steer val in [-1, 1]
"""
steer = action_index * 2 / float(FLAGS.n_action - 1) - 1. ## range is [-1, 1]
return steer
def single_execuate(target, args):
""" single thread execuate
Args:
target: a func
args: args in target
"""
threading.Thread(target=target, args=args).start()
def check_whether_respawn_actors(world, vehicles):
"""check whether to respawn the static acotors in a frequency"""
while True:
if carla_actors_static(vehicles, bigger_than=0.75):
respawn_static_actors(world, vehicles)
time.sleep(5)
def target_thread(sess, online_begin_signal):
"""a thread for target nets in DDPG"""
begin = True
avg_r = 0.
while True:
## get current state
imgs = []
for camera_sensor, lane_invasion in zip(cameras, lane_invasions):
img = camera_sensor.get()
img = img[int(FLAGS.img_height*1.8//5):, :, :] ## corp the ROI
img = cv2.resize(img, dsize=(FLAGS.net_img_height, FLAGS.net_img_width))
# cv2.imshow('test', img)
imgs.append(img)
lane_invasion.clear()
current_img_state = np.array(imgs)
current_img_state = current_img_state*2./255. - 1.
## get current action and control the egopilots
current_action, current_step = sess.run([max_action_index_online, global_step], feed_dict={online_img_state: current_img_state})
## control the egopilots ##
i = 0
for egopilot, c_a in zip(egopilots, current_action):
## e-greedy
current_action_index = e_greedy(current_step, c_a)
current_action[i] = current_action_index
steer = action_index_2_steer(current_action_index)
throttle = 0.5
brake = 0.
ego_v = egopilot.get_velocity()
ego_v = math.sqrt(ego_v.x ** 2 + ego_v.y ** 2 + ego_v.z ** 2)
if ego_v > 8. and throttle > 0.5:
throttle = 0.5 ## avoid velocity too big
## apply control
egopilot.apply_control(carla.VehicleControl(throttle=throttle, steer=steer, brake=brake))
i += 1
# cv2.waitKey(1000)
time.sleep(0.75) ## sleep for a while, let the action control the egopilots to next state
## reward calculation
r_s = np.zeros(shape=(len(egopilots))) ## init is 0 reward
## about the velocity and steer
for i, egopilot in enumerate(egopilots):
v = egopilot.get_velocity()
v = math.sqrt(v.x ** 2 + v.y ** 2 + v.z ** 2)
#
# if v <= 6:
# r_s[i] += v**2/6.
# elif v <= 8:
# r_s[i] += 3 * (8 - v)
# else:
# r_s[i] -= 2 * (v - 8) ** 2
#
# if egopilot.get_control().steer > 0.1 :
# r_s[i] = 0
## make steer small as possible
if v >= 0.1: ##m/s
r_s[i] += (10*(gaussian_r(egopilot.get_control().steer, mu=0., sigma=0.3)) - 5)
else:
r_s[i] = 0.
## about the collision and lane invasion
end = np.zeros(len(egopilots)).astype(np.float32)
i = 0
for egopilot, lane_invasion, obj_collision in zip(egopilots, lane_invasions, obj_collisions):
on_collision = obj_collision.get()
on_invasion = lane_invasion.get()
# if on_collision:
# r_s[i] -= 30
# end[i] = 1.
if on_invasion:
r_s[i] -= 30
end[i] = 1.
i += 1
# print('a_r:', r_s)
## get next state
imgs = []
for camera_sensor, egopilot in zip(cameras, egopilots):
img = camera_sensor.get()
img = img[int(FLAGS.img_height*1.8//5):, :, :] ## corp the ROI
img = cv2.resize(img, dsize=(FLAGS.net_img_height, FLAGS.net_img_width))
imgs.append(img)
next_img_state = np.array(imgs)
next_img_state = next_img_state * 2. / 255. - 1.
## put the memory in pooling
for c_img_state, c_action, n_img_state, c_r, end_f in zip(current_img_state,current_action,
next_img_state, r_s, end):
if c_r > 1.:
c = 1
else:
c = 0
memory_pool.put(memory=[c_img_state.astype(np.float32), c_action, n_img_state.astype(np.float32),
c_r, end_f])
## check whether end.
for egopilot, lane_invasion, obj_collision in zip(egopilots, lane_invasions, obj_collisions):
# on_collision = obj_collision.get()
on_invasion = lane_invasion.get()
# if on_collision:
# obj_collision.clear()
# single_execuate(target=respawn_actors, args=(world, [egopilot],))
if on_invasion:
# respawn_actors(world, [egopilot])
i = random.uniform(0, 1)
if i >= 0.7:
while True:
index = random.sample(cornet_init_points, 1)
if is_spawn_point_safe(egopilots, spawn_points[index[0]]):
respawn_actor_at(world, egopilot, transform=spawn_points[index[0]])
break
else:
while True:
index = random.randint(0, (len(spawn_points)) - 1)
if is_spawn_point_safe(egopilots, spawn_points[index]):
respawn_actor_at(world, egopilot, transform=spawn_points[index])
break
# respawn_actors(world, [egopilot])
# time.sleep(2.)
if begin and memory_pool.capacity_bigger_than(val=3500):
begin = False
online_begin_signal.set()
# print(memory_pool.get_propotion())
if FLAGS.log_every_n_steps != None:
## caculate average loss ##
step = current_step % FLAGS.log_every_n_steps
avg_r = (avg_r * step + np.mean(np.array(r_s))) / (step + 1.)
if step == FLAGS.log_every_n_steps - 1:
logger.info('Step-%s:Reward:%s' % (str(current_step), str(round(avg_r,3))))
avg_r = 0.
def update_thread(sess, online_begin_signal):
online_begin_signal.wait()
logger.info('Begin online nets...')
avg_loss = 0.
while True:
#### prepare memory data ####
batch_memorys = memory_pool.get(batch_size=FLAGS.batch_size)
## calculate the norm_rewards and replace raw rewards with them.
# raw_rewards = [m[3] for m in batch_memorys]
# r = rl_tools.normalize_rewards(raw_rewards)
# rl_tools.replace(batch_memorys, r)
current_img_states = []
current_actions = []
next_img_states = []
current_rewards = []
end_flags = []
for a_memory in batch_memorys:
current_img_states.append(a_memory[0])
current_actions.append(a_memory[1])
next_img_states.append(a_memory[2])
current_rewards.append(a_memory[3])
end_flags.append(a_memory[4])
current_img_states = np.array(current_img_states)
current_actions = np.array(current_actions)
next_img_states = np.array(next_img_states)
current_rewards = np.array(current_rewards)
end_flags = np.array(end_flags)
q_l, up = sess.run([q_loss, online_update], feed_dict={online_img_state: current_img_states, real_action_index: current_actions,
reward: current_rewards, target_img_state: next_img_states, whether_end: end_flags,
lr: FLAGS.learning_rate})
current_step = sess.run(global_step)
if FLAGS.log_every_n_steps != None:
## caculate average loss ##
step = current_step % FLAGS.log_every_n_steps
avg_loss = (avg_loss * step + q_l) / (step + 1.)
if step == FLAGS.log_every_n_steps - 1:
logger.info('Step-%s:Q_loss:%s' % (str(current_step), str(round(avg_loss, 3))))
avg_loss = 0.
if FLAGS.f_save_step != None:
step = current_step % FLAGS.f_save_step
if step == FLAGS.f_save_step - 1:
## save model ##
logger.info('Saving model...')
model_name = os.path.join(FLAGS.train_dir, 'dqn_keep_lane')
saver.save(sess, model_name, global_step=current_step)
logger.info('Save model sucess...')
if current_step % FLAGS.target_update_f == FLAGS.target_update_f - 1:
sess.run(update_target_ops)
def vis_memory_thread():
while True:
if memory_pool.capacity_bigger_than(val=20):
#### prepare memory data ####
batch_memorys = memory_pool.get(batch_size=15)
## calculate the norm_rewards and replace raw rewards with them.
raw_rewards = [m[3] for m in batch_memorys]
r = rl_tools.normalize_rewards(raw_rewards)
rl_tools.replace(batch_memorys, r)
current_img_states = []
current_actions = []
next_img_states = []
current_rewards = []
end_flags = []
for a_memory in batch_memorys:
current_img_states.append(a_memory[0])
current_actions.append(a_memory[1])
next_img_states.append(a_memory[2])
current_rewards.append(a_memory[3])
end_flags.append(a_memory[4])
for current_img_state, current_action, next_img_state, current_reward, end_flag, in zip(current_img_states, current_actions,
next_img_states, current_rewards, end_flags):
current_img_state = np.array(current_img_state)
current_action = np.array(current_action)
next_img_state = np.array(next_img_state)
current_reward = np.array(current_reward)
end_flag = np.array(end_flag)
current_img_state = np.uint8((current_img_state + 1.)*255./2.)
next_img_state = np.uint8((next_img_state + 1.)*255./2.)
real_steer = action_index_2_steer(current_action)
logger.info('end: %s, Current steer is %s, and reward is %s'%(str(end_flag), str(real_steer), str(current_reward)))
logger.info('------------------------------------------------')
cv2.imshow('current state', current_img_state)
cv2.imshow('next state', next_img_state)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
online_img_state = tf.placeholder(shape=[None, FLAGS.net_img_height, FLAGS.net_img_width, 3], dtype=tf.float32)
target_img_state = tf.placeholder(shape=[None, FLAGS.net_img_height, FLAGS.net_img_width, 3], dtype=tf.float32)
## other input ##
reward = tf.placeholder(shape=[None], dtype=tf.float32)
whether_end = tf.placeholder(shape=[None], dtype=tf.float32) ##True is end ,False is continue
real_action_index = tf.placeholder(shape=[None], dtype=tf.int64)
lr = tf.placeholder(dtype=tf.float32)
global_step = tf.Variable(0, trainable=False, name='global_step')
act_val_net_online = action_value_net()
act_val_online, vars_online = act_val_net_online.build_graph(img_state=online_img_state, n_action=FLAGS.n_action, is_training=True,
var_scope='online_act_val')
act_val_net_target = action_value_net()
act_val_target, vars_target = act_val_net_online.build_graph(img_state=online_img_state, n_action=21,
is_training=True,
var_scope='target_act_val')
#########################################
## the best action ops in current step ##
#########################################
max_action_index_online = tf.argmax(act_val_online, axis=-1)
max_action_index_target = tf.argmax(act_val_target, axis=-1)
###################################
### hard copy ops for first init###
###################################
update_target_ops = rl_tools.copy_a2b(vars_a=vars_online, vars_b=vars_target)
###########
## q loss##
###########
max_q_val_target = tf.reduce_sum(act_val_target * tf.one_hot(max_action_index_target, FLAGS.n_action), axis=-1) ## need img_state_target
q_val_online = tf.reduce_sum(act_val_online * tf.one_hot(real_action_index, FLAGS.n_action), axis=-1) ## need img_state_online, real_action_index
q_loss = tf.reduce_mean(tf.square(reward + (1.-whether_end)*max_q_val_target - q_val_online)) ## need reward, whether_end
###############
## update #####
############
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer_for_online = tf.train.RMSPropOptimizer(learning_rate=lr)
q_gradients_vars = optimizer_for_online.compute_gradients(q_loss, var_list=vars_online)
capped_gvs = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in q_gradients_vars] ## clip the gradients
online_update = optimizer_for_online.apply_gradients(capped_gvs, global_step=global_step)
####
## sess.run([q_loss, online_update], feed_dict={img_state_online: current_img, real_action_index: current_action, reward: current_reward,
# img_state_target: next_img, whether_end: end_flags})
##########################
### init, saver, ckpt ####
##########################
init = tf.global_variables_initializer()
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
logger.info('Tensorflow graph bulid success...')
logger.info('Total trainable parameters:%s' %
str(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
########################### TENSORFLOW GRAPH ######################################
#### carla world init ####
client = carla.Client('127.0.0.1', 2000)
client.set_timeout(10.0) # seconds
logger.info('Carla connect success...')
logger.info('Carla world initing...')
world = client.get_world()
destroy_all_actors(world)
## spawn vehicles in carla world
# spawn_points = list(world.get_map().get_spawn_points())
# spawn_egopilot_at(world, spawn_points[45])
spawn_vehicles(world, n_autopilots=0, n_egopilots=FLAGS.n_egopilots)
time.sleep(2) ## sometimes unstale
autopilots = get_all_autopilots(world)
egopilots = get_all_egopilots(world)
cameras = []
lane_invasions = []
obj_collisions = []
# obstacle_aheads = []
logger.info('Adding some sensors to egopilots...')
for egopilot in egopilots:
## attach a camera to egopilot ##
# semantic_camera_config['attach_to'] = egopilot
# semantic_sensor = semantic_camera(world, semantic_camera_config)
# cameras.append(semantic_sensor)
bgr_camera_config['attach_to'] = egopilot
bgr_sensor = bgr_camera(world, bgr_camera_config)
cameras.append(bgr_sensor)
## attach collision sensor to egopilot ##
collision_sensor_config['attach_to'] = egopilot
collision_sensor = collision_query(world, collision_sensor_config)
obj_collisions.append(collision_sensor)
## attach line invasion sensor to egopilot ##
invasion_sensor_config['attach_to'] = egopilot
lane_invasion_sensor = lane_invasion_query(world, invasion_sensor_config)
lane_invasions.append(lane_invasion_sensor)
# ## attach obstacle sensor to egopilot
# obstacle_sensor_config['attach_to'] = egopilot
# obstacle_sensor = obstacle_ahead_query(world, obstacle_sensor_config)
# obstacle_aheads.append(obstacle_sensor)
logger.info('Adding some sensors to egopilots success')
memory_pool = rl_tools.memory_pooling(maxlen=4000)
online_begin_signal = threading.Event()
spawn_points = list(world.get_map().get_spawn_points())
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if ckpt:
logger.info('loading %s...' % str(ckpt.model_checkpoint_path))
saver.restore(sess, ckpt.model_checkpoint_path)
logger.info('Load checkpoint success...')
else:
sess.run(init)
sess.run(update_target_ops)
logger.info('DQN all network variables init success...')
check_t = threading.Thread(target=check_whether_respawn_actors, args=(world, autopilots + egopilots,))
target_t = threading.Thread(target=target_thread, args=(sess, online_begin_signal,))
# # respwan_v_t = threading.Thread(target=respwan_vehicles_in_traffic_light)
update_t = threading.Thread(target=update_thread, args=(sess, online_begin_signal,))
target_t.daemon = True
check_t.daemon = True
update_t.daemon = True
check_t.start()
# # respwan_v_t.start()
target_t.start()
update_t.start()
# vis_memory_thread()
while True:
pass
|
httpstream.py
|
from queue import Queue
from collections import namedtuple
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
import asyncio
import aiohttp
import itertools
import threading
# todo remove
import simplestopwatch as sw
# todo check out aiodns resolver
# https://stackoverflow.com/a/45169094/1102470
Response = namedtuple('Response', ['request', 'status', 'reason', 'text'])
# Used to flush the response queue and stop the iterator.
STOP_SENTINEL = {}
def grouper(n, iterable):
""" Yields successive lists of size n from iterable """
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
async def send(client, request):
""" Handles a single request """
async with client.get(request) as response:
return Response(
request=request,
status=response.status,
reason=response.reason,
text=await response.text(),
# json=await response.json(),
)
async def send_chunk(client, requests):
""" Handles a chunk of requests asynchronously """
tasks = (asyncio.ensure_future(send(client, r)) for r in requests)
return await asyncio.gather(*tasks)
async def send_stream(requests, sync_queue, concurrency_limit):
""" Handles a stream of requests and pushes responses to a queue """
async with aiohttp.ClientSession() as client:
# Gather responses in chunks of size concurrency_limit
for request_chunk in grouper(concurrency_limit, requests):
for response in await send_chunk(client, request_chunk):
sync_queue.put(response)
sync_queue.put(STOP_SENTINEL)
def response_generator(sync_queue):
""" Wrap a standard queue with a generator """
while True:
response = sync_queue.get()
if response is STOP_SENTINEL:
return
yield response
def worker(loop, pending_tasks):
loop.run_until_complete(asyncio.gather(*pending_tasks))
loop.close()
def streamer(requests, concurrency_limit=1000):
"""
Returns a generator of HTTP responses for the given generator of HTTP requests.
Results are returned in the same order as received.
The response generator will block while waiting for the HTTP requests to
be completed asynchronously. Callers may iterate over the results as
quickly as they arrive using a standard generator. This enables
lazy-evaluated HTTP streams.
Example:
urls = (f"http://my.company/{i}" for i in range(10))
responses = streamer(urls)
data = (my_transform_function(r) for r in responses)
"""
sync_queue = Queue(concurrency_limit)
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
loop = asyncio.get_event_loop()
loop.create_task(send_stream(requests, sync_queue, concurrency_limit))
pending_tasks = asyncio.Task.all_tasks()
threading.Thread(name='worker', target=worker, args=(loop, pending_tasks)).start()
return response_generator(sync_queue)
NUM_URLS = 1000
def urls_gen():
for _ in range(NUM_URLS):
yield 'http://localhost:8080/'
if __name__ == '__main__':
print("Running main")
timer = sw.Timer()
responses = streamer(urls_gen())
for r in responses:
pass
timer.stop()
print()
print("Time elapsed:", timer.elapsed)
print("Human time:", timer.elapsed_human)
print("Rate:", NUM_URLS / timer.elapsed)
print("Ending main")
|
tvhProxy.py
|
#!/usr/bin/env python
from gevent import monkey
monkey.patch_all()
from dotenv import load_dotenv
from ssdp import SSDPServer
from flask import Flask, Response, request, jsonify, abort, render_template
from gevent.pywsgi import WSGIServer
import xml.etree.ElementTree as ElementTree
from datetime import timedelta, datetime, time
import logging
import socket
import threading
import requests
from requests.auth import HTTPDigestAuth
import os
import time
import sched
logging.basicConfig(level=logging.DEBUG)
load_dotenv(verbose=True)
app = Flask(__name__)
scheduler = sched.scheduler()
log_format = "[%(asctime)s: %(levelname)s/%(process)s:%(thread)s] %(name)s:%(funcName)s:%(lineno)d - %(message)s"
logger = logging.getLogger()
logger.propagate = True
logger.setLevel(10)
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
# URL format: <protocol>://<username>:<password>@<hostname>:<port>, example: https://test:1234@localhost:9981
config = {
'deviceID': os.environ.get('DEVICE_ID') or '12345678',
'bindAddr': os.environ.get('TVH_BINDADDR') or '',
'tvhURL': os.environ.get('TVH_URL') or 'http://localhost:9981',
'tvhUser': os.environ.get('TVH_USER') or 'test',
'tvhPassword': os.environ.get('TVH_PASSWORD') or 'test',
# only used if set (in case of forward-proxy), otherwise assembled from host + port bel
'tvhProxyURL': os.environ.get('TVH_PROXY_URL'),
'tvhProxyHost': os.environ.get('TVH_PROXY_HOST') or host_ip,
'tvhProxyPort': os.environ.get('TVH_PROXY_PORT') or 5004,
# number of tuners in tvh
'tunerCount': os.environ.get('TVH_TUNER_COUNT') or 6,
'tvhWeight': os.environ.get('TVH_WEIGHT') or 300, # subscription priority
# usually you don't need to edit this
'chunkSize': os.environ.get('TVH_CHUNK_SIZE') or 1024*1024,
# specifiy a stream profile that you want to use for adhoc transcoding in tvh, e.g. mp4
'streamProfile': os.environ.get('TVH_PROFILE') or 'pass'
}
discoverData = {
'FriendlyName': 'tvhProxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': int(config['tunerCount']),
'FirmwareVersion': '20150826',
'DeviceID': config['deviceID'],
'DeviceAuth': 'test1234',
'BaseURL': '%s' % (config['tvhProxyURL'] or "http://" + config['tvhProxyHost'] + ":" + str(config['tvhProxyPort'])),
'LineupURL': '%s/lineup.json' % (config['tvhProxyURL'] or "http://" + config['tvhProxyHost'] + ":" + str(config['tvhProxyPort']))
}
@app.route('/discover.json')
def discover():
return jsonify(discoverData)
@app.route('/lineup_status.json')
def status():
return jsonify({
'ScanInProgress': 0,
'ScanPossible': 0,
'Source': "Cable",
'SourceList': ['Cable']
})
@app.route('/lineup.json')
def lineup():
lineup = []
for c in _get_channels():
if c['enabled']:
url = '%s/stream/channel/%s?profile=%s&weight=%s' % (
config['tvhURL'], c['uuid'], config['streamProfile'], int(config['tvhWeight']))
lineup.append({'GuideNumber': str(c['number']),
'GuideName': c['name'],
'URL': url
})
return jsonify(lineup)
@app.route('/lineup.post', methods=['GET', 'POST'])
def lineup_post():
return ''
@app.route('/')
@app.route('/device.xml')
def device():
return render_template('device.xml', data=discoverData), {'Content-Type': 'application/xml'}
@app.route('/epg.xml')
def epg():
return _get_xmltv(), {'Content-Type': 'application/xml'}
def _get_channels():
url = '%s/api/channel/grid?start=0&limit=999999' % config['tvhURL']
logger.info('downloading channels from %s', url)
try:
r = requests.get(url, auth=HTTPDigestAuth(config['tvhUser'], config['tvhPassword']))
r.raise_for_status()
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
logger.info('Timeout received from %s', url)
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
logger.info('Too many redirects received from %s', url)
except requests.exceptions.HTTPError as e:
raise SystemExit(e)
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
raise SystemExit(e)
return r.json()['entries']
#except Exception as e:
# logger.error('An error occured: %s' + repr(e))
def _get_xmltv():
url = '%s/xmltv/channels' % config['tvhURL']
logger.info('downloading xmltv from %s', url)
try:
r = requests.get(url, auth=HTTPDigestAuth(config['tvhUser'], config['tvhPassword']))
r.raise_for_status()
tree = ElementTree.ElementTree(
ElementTree.fromstring(requests.get(url, auth=HTTPDigestAuth(config['tvhUser'], config['tvhPassword'])).content))
root = tree.getroot()
channelNumberMapping = {}
channelsInEPG = {}
for child in root:
if child.tag == 'channel':
channelId = child.attrib['id']
channelNo = child[1].text
channelNumberMapping[channelId] = channelNo
if channelNo in channelsInEPG:
logger.error("duplicate channelNo: %s", channelNo)
channelsInEPG[channelNo] = False
child.remove(child[1])
# FIXME: properly rewrite with TVH_URL or even proxy
child[1].attrib['src'] = child[1].attrib['src']+".png"
child.attrib['id'] = channelNo
if child.tag == 'programme':
child.attrib['channel'] = channelNumberMapping[child.attrib['channel']]
channelsInEPG[child.attrib['channel']] = True
for key in sorted(channelsInEPG):
if channelsInEPG[key]:
logger.debug("Programmes found for channel %s", key)
else:
channelName = root.find(
'channel[@id="'+key+'"]/display-name').text
logger.error("No programme for channel %s: %s",
key, channelName)
# create 2h programmes for 72 hours
yesterday_midnight = datetime.combine(
datetime.today(), time.min) - timedelta(days=1)
date_format = '%Y%m%d%H%M%S'
for x in range(0, 36):
dummyProgramme = ElementTree.SubElement(root, 'programme')
dummyProgramme.attrib['channel'] = str(key)
dummyProgramme.attrib['start'] = (
yesterday_midnight + timedelta(hours=x*2)).strftime(date_format)
dummyProgramme.attrib['stop'] = (
yesterday_midnight + timedelta(hours=(x*2)+2)).strftime(date_format)
dummyTitle = ElementTree.SubElement(
dummyProgramme, 'title')
dummyTitle.attrib['lang'] = 'eng'
dummyTitle.text = channelName
dummyDesc = ElementTree.SubElement(dummyProgramme, 'desc')
dummyDesc.attrib['lang'] = 'eng'
dummyDesc.text = "No programming information"
logger.info("returning epg")
return ElementTree.tostring(root)
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
logger.info('Timeout received from %s', url)
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
logger.info('Too many redirects received from %s', url)
except requests.exceptions.HTTPError as e:
raise SystemExit(e)
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
raise SystemExit(e)
except requests.exceptions.RequestException as e: # This is the correct syntax
logger.error('An error occured: %s' + repr(e))
def _start_ssdp():
ssdp = SSDPServer()
thread_ssdp = threading.Thread(target=ssdp.run, args=())
thread_ssdp.daemon = True # Daemonize thread
thread_ssdp.start()
ssdp.register('local',
'uuid:{}::upnp:rootdevice'.format(discoverData['DeviceID']),
'upnp:rootdevice',
'http://{}:{}/device.xml'.format(
config['tvhProxyHost'], config['tvhProxyPort']),
'SSDP Server for tvhProxy')
if __name__ == '__main__':
http = WSGIServer((config['bindAddr'], config['tvhProxyPort']),
app.wsgi_app, log=logger, error_log=logger)
logger.info('Starting server on host %s port %d.',config['bindAddr'], config['tvhProxyPort'])
#_start_ssdp()
http.serve_forever()
|
app.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys, glob, os
sys.path.insert(0, glob.glob(os.environ.get('LUCIDAROOT') +
'/../tools/thrift-0.9.3/lib/py/build/lib*')[0])
from controllers import *
from flask import *
from threading import Thread
import logging
# Initialize the Flask app with the template folder address.
app = Flask(__name__, template_folder='templates')
# app.config.from_object('config')
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16 MB due to MongoDB
# Register the controllers.
app.register_blueprint(Main.main)
app.register_blueprint(User.user)
app.register_blueprint(Create.create)
app.register_blueprint(Learn.learn)
app.register_blueprint(Infer.infer)
# Session.
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
def flask_listener():
app.run(host='0.0.0.0', port=3000, debug=True, use_reloader=False,
threaded=True)
def web_socket_listener():
print 'Start web socket at 8081'
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)8s %(asctime)s %(message)s ")
logging.debug('Starting up server')
WebSocket.tornado.options.parse_command_line()
WebSocket.Application().listen(8081)
WebSocket.tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
Thread(target = flask_listener).start()
web_socket_listener()
|
main.py
|
# Librerias Necesarias
from tkinter import *
from tkinter import ttk
from PIL import ImageTk, Image
import numpy as np
import sys
sys.path.insert(1, 'config/')
import languaje as lang
import setting as stt
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.patches import Ellipse
import matplotlib.animation as animation
from matplotlib.text import OffsetFrom
from matplotlib.gridspec import GridSpec
import csv
import json
##Lenguaje
def i18n(a,b,*f):
i18n = lang.i18n(a,b)
return i18n
##Variables de forma del gráfico
Y_ampl = 18
X_time = 10
average = 2000
niveldb = 80
fonticksmini = {'fontsize': 6}
GridTrue = True
LANG=0
continuePlotting = False
def cmd(icon):
pass
def change_state():
global continuePlotting
print(2)
if continuePlotting == True:
continuePlotting = False
else:
continuePlotting = True
def averageTick(average):
ticksAverage = average/13
ticks =[0]
i = 0
while i < 13:
i = i+1
suma = ticksAverage * i
ticks.append(suma)
return ticks
class CreateToolTip(object):
'''
create a tooltip for a given widget
'''
def __init__(self, widget, text='widget info'):
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.close)
def enter(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_self.rootx() + 25
y += self.widget.winfo_self.rooty() + 20
# creates a toplevel window
self.tw = Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = Label(self.tw, text=self.text, justify='left',
background='white', relief='solid', borderwidth=1,
font=("arial", "10", "normal"))
label.pack(ipadx=1)
def close(self, event=None):
if self.tw:
self.tw.destroy()
class windows():
def __init__(self, root):
# Se inicia la ventana
self.root = root
self.root.config(background='white')
self.root.geometry(stt.size_window[0])#Tamaño inicial de la ventana
self.root.update_idletasks()
self.root.minsize(stt.size_window[1], stt.size_window[2])#Tamaño minimo de la ventana
#self.root.maxsize(stt.size_window[1], stt.size_window[2])#mantiene un tamaño fijo
self.root.call('wm', 'iconphoto', self.root._w, ImageTk.PhotoImage(Image.open('resources/icon.ico')))#Icono de la ventana
self.root.title("simPEATC") ##Titulo de la ventana
self.app()
def anim(self):
a = []
b = []
with open('curvatest.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
a.append((float(row[0])/10)-4)
b.append(float(row[1])/6)
t = np.linspace(1, 12, 26)
out_a= np.asarray(b)
out_b= np.asarray(a)
x_watts = out_a ** 2
target_noise_db = 30
prom=0
text=r"80 OD "
c_red=[1.0,0.5,0.5]
c_blue=[0.5,0.5,1.0]
color=c_red
el = Ellipse((2, -1), 0.5, 0.5)
plt.ion()
self.ax3.add_patch(el)
self.Btn_iniciar.configure(text="Detener")
for i in range(55):
target_noise_db = target_noise_db - 1
target_noise_watts = 10 ** (target_noise_db / 10)
mean_noise = 0
noise_volts = np.random.normal(mean_noise, np.sqrt(target_noise_watts), len(x_watts))
y_volts = out_a + noise_volts
ytext=y_volts[0]
xtext=out_b[0]
prom=prom+1
self.ax3.plot(out_b, y_volts, color=color)
self.ax3.grid(True)
self.ax3.yaxis.set_major_formatter(plt.NullFormatter())
self.ax3.grid(GridTrue, linestyle='--')
self.ax3.set_xticks(np.arange(X_time+1))
self.ax3.set_yticks(np.arange(Y_ampl+1))
self.ax3.set_xlim(0,X_time)
self.ax3.set_ylim(0,Y_ampl)
self.ann = self.ax3.annotate(text,
xy=(xtext,ytext), xycoords='data',
xytext=(8, 0), textcoords='offset points',
size=30, va="center",
bbox=dict(boxstyle="round", fc=(color), ec="none"),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(color), ec="none",
patchA=None,
patchB=el,
relpos=(0.2, 0.5)))
self.graphy.draw()
self.ax3.clear()
plt.pause(0.2)
#plt.cla()
def graxx(self):
data=[]
data=[1,(1,2,3,4,5,6,7,8,9,10),(5,5,5,5,5,5,5,5,5,5)]
x=(1,2,3,4,5,6,7,8,9)
y=(9,7,6,5,6,4,3,2,1)
self.ax3.plot(x,y)
self.graphy.draw()
self.ax3.clear()
def graph(self, *L):
fig = plt.figure(num="TUG", figsize=(3,3))
t = np.arange(0, 3, .01)
gs1 = GridSpec(9, 8)
gs1.update(left=0.05, right=0.95, wspace=0.5, hspace=1, top=0.98, bottom=0.08)
self.ax1 = plt.subplot(gs1[0:1,0:2])
self.ax1.grid(GridTrue)
self.ax1.set_xlim(0,1)
self.ax1.set_ylim(0,3)
self.ax1.set_yticks([1,2])
self.ax1.set_xticks([0,1])
self.ax1.xaxis.set_major_formatter(plt.NullFormatter())
self.ax1.set_yticklabels(['D','I'],fontdict=fonticksmini,horizontalalignment='left')
self.ax2 = plt.subplot(gs1[0:1, 3:8])
self.ax2.grid(GridTrue)
self.ax2.set_xlim(0,average)
self.ax2.set_ylim(0,200)
tiks_X = averageTick(average)
self.ax2.set_yticks([0,40, 150, 200])
self.ax2.set_xticks(tiks_X)
self.ax2.set_yticklabels(['',40,'',200],fontdict=fonticksmini)
self.ax2.set_xticklabels([0,'','','','','','','','','','','','',average],fontdict=fonticksmini)
self.ax3 = plt.subplot(gs1[1:, 0:4])
self.ax3.yaxis.set_major_formatter(plt.NullFormatter())
self.ax3.grid(GridTrue, linestyle='--')
self.ax3.set_xticks(np.arange(X_time+1))
self.ax3.set_yticks(np.arange(Y_ampl+1))
self.ax3.set_xlim(0,X_time)
self.ax3.set_ylim(0,Y_ampl)
self.ax4 = plt.subplot(gs1[1:, 4:8])
self.ax4.yaxis.set_major_formatter(plt.NullFormatter())
self.ax4.grid(GridTrue, linestyle='--')
self.ax4.set_xlim(0,X_time)
self.ax4.set_ylim(0,Y_ampl)
self.ax4.set_yticks(np.arange(Y_ampl+1))
self.ax4.set_xticks(np.arange(X_time+1))
return fig
def app(self):
#Configuración de los Frames, proporciones en setting.py variable stt.size_frame
frame_quick = Frame(bd=1,relief="sunken") ##crea la caja superior
frame_contenido = Frame(bd=1, bg="white",relief="sunken") ##crea la caja derecha
frame_info = Frame(bd=1,relief="sunken") ##crea la caja inferior
frame_command = Frame(bd=1,relief="sunken") ##crea la caja izquierda
frame_quick.place(relx=0, rely=0, relwidth=stt.size_frame['up'][0], relheight=stt.size_frame['up'][1])
frame_contenido.place(relx=stt.size_frame['izq'][0],rely=stt.size_frame['up'][1],
relwidth=stt.size_frame['der'][0], relheight=stt.size_frame['der'][1])
#frame_command.place(relx=0, rely=stt.size_frame['up'][1], relwidth=stt.size_frame['izq'][0],
# relheight=stt.size_frame['izq'][1])
frame_command.place(relx=0,rely=stt.size_frame['up'][1],relheight=stt.size_frame['izq'][1], width=stt.size_frame['izq'][5])
frame_info.place(relx=0, rely=stt.size_frame['down'][3], relwidth=stt.size_frame['down'][0],
relheight=stt.size_frame['down'][1])
#Se llama al grafico para que se posicione sobre la caja Derecha como canvas
fig = self.graph()
self.graphy = FigureCanvasTkAgg(fig, master=frame_contenido)
self.graphy.get_tk_widget().pack(side="top",fill='both',expand=True)
#Menú
menu = Menu(self.root)
self.root.config(menu=menu)
file = Menu(menu, tearoff=0)
file.add_command(label="Abrir usuario")
file.add_command(label="Nuevo usuario")
file.add_command(label="Cerrar usuario")
file.add_separator()
file.add_command(label="Salir")
menu.add_cascade(label="Archivo", menu=file)
edit = Menu(menu, tearoff=0)
edit.add_command(label="Nueva Prueba")
edit.add_command(label="Borrar Prueba")
edit.add_separator()
edit.add_command(label="Abrir prueba suelta")
menu.add_cascade(label="Editar", menu=edit)
help = Menu(menu, tearoff=0)
help.add_command(label="Ayuda")
help.add_separator()
help.add_command(label="Acerca de nosotros",)
menu.add_cascade(label="Ayuda", menu=help)
#Comandos para actualizar información sobre la pantalla, y obtener la información del ancho y alto
self.root.update()
fr_cmd_with = frame_command.winfo_width()
fr_cmd_height = frame_command.winfo_height()
#Tabs en comandos cuadro izquierdo pantalla principal
note_command = ttk.Notebook(frame_command, width=stt.size_frame['izq'][5], height=fr_cmd_height)
#note_command.grid(row=1, column=0, columnspan=50, rowspan=49, sticky='NESW')
note_command.pack(expand=True, fill=BOTH)
#Tab 1: registro
tab_registro= Frame(note_command)
note_command.add(tab_registro, text=lang.i18n('record',LANG,0))
tab_mark= Frame(note_command)
note_command.add(tab_mark, text=lang.i18n('edit',LANG,0))
tab_latency= Frame(note_command)
note_command.add(tab_latency, text=lang.i18n('latency',LANG,0))
#Tab1, frame1: Estimulo
frame_estimulo =Frame(tab_registro, relief=GROOVE, borderwidth=2)
label_nivel = (lang.i18n('level',LANG,0)+':'+str(niveldb)+' db nHL')
Label(frame_estimulo, text=label_nivel).grid(row=1,sticky=W)
Label(frame_estimulo, text=lang.i18n('record',LANG,0)+':'+ lang.stim[0]).grid(row=2,sticky=W)#el stimulo debe ser modificable
Label(frame_estimulo, text='Mask : Off').grid(row=3,sticky=W)
Label(frame_estimulo, text='Estimulo',font=("Courier",10)).grid(row=0)
frame_estimulo.place(rely=0.03)
#Tab1, frame2: Nuevo test
frame_new_test=Frame(tab_registro, relief=GROOVE, borderwidth=2)
check_array = ['0db', '10db', '20db', '30db',
'40db', '50db', '60db', '70db', '80dB',
'90db', '100db']
check_vars = []
for i in range(len(check_array)):
check_vars.append(StringVar())
check_vars[-1].set(0)
c = Checkbutton(frame_new_test, text=check_array[i], variable=check_vars[-1], command=lambda i=i: printSelection(i), onvalue=1, offvalue=0)
if i < 1:
c.grid(row=i+1,sticky=W)
else:
c.grid(row=i,sticky=W)
Lado_estimulo = [
("OD", "1"),
("OI", "2"),
("Bilateral", "3")]
v = StringVar()
v.set("1") # initialize
for text, mode in Lado_estimulo:
b = Radiobutton(frame_new_test, text=text,
variable=v, value=mode)
b.grid(sticky=W)
Label(frame_new_test, text='Prueba',font=("Courier",10)).grid(row=0)
frame_new_test.place(rely=0.03, relx=0.65)
#Tab1, frame3: reproductibilidad
frame_reproductibilidad=Frame(tab_registro, relief=GROOVE, borderwidth=2)
reproductibilidad = ttk.Progressbar(frame_reproductibilidad,
orient="horizontal",length=200, mode="determinate")
reproductibilidad.grid(row=2, columnspan=3)
Label(frame_reproductibilidad, text='0').grid(row=1, column=0, sticky=W)
Label(frame_reproductibilidad, text='50').grid(row=1, column=1, sticky=W+E)
Label(frame_reproductibilidad, text='100').grid(row=1, column=2, sticky=E)
Label(frame_reproductibilidad, text='% de reproductibilidad ',
font=("Courier",10)).grid(row=0, columnspan=3)
frame_reproductibilidad.place(rely=0.5, relx=0.07)
#Tab1, frame4: promediaciones
frame_promediaciones=Frame(tab_registro, relie=GROOVE, borderwidth=2)
Label(frame_promediaciones, text='Promediaciones',
font=("Courier",10)).grid(row=0)
prom_estim=0
rechazos=0
Label(frame_promediaciones,text=('Promediaciones: '+str(prom_estim))).grid(row=1, sticky=W)
Label(frame_promediaciones,text=('Rechazos: '+str(rechazos))).grid(row=2,sticky=W)
frame_promediaciones.place(rely=0.3, relx=0)
#Tab1, frame 5: Botones de control
frame_iniciar=Frame(tab_registro, relie=GROOVE, borderwidth=0)
self.Btn_iniciar = Button(frame_iniciar, state=NORMAL,text="Iniciar", height=2, width=22, command=self.anim)
self.Btn_iniciar.grid(row=1)
self.Btn_pause=Button(frame_iniciar, state=DISABLED,text="Pause", height=1, width=22).grid(row=2)
self.Btn_next=Button(frame_iniciar, state=DISABLED,text="Siguiente estimulo", height=1, width=22).grid(row=3)
frame_iniciar.place(rely=0.65, relx=0.07)
#Quick Bar
#Button(frame_quick, text='Ayuda').pack(anchor=W)
width = 50
height = 50
icons = ('new', 'save', 'saveas', 'print', 'potential', 'config', 'help')
names = ('Nuevo', 'Guardar', 'Guardar como...','Imprimir', 'Potenciales', 'Configurar', 'Ayuda')
for i, icon in enumerate(icons):
tool_bar_icon = PhotoImage(file='resources/icons/{}.png'.format(icon))
#cmd = eval(icon)
small_logo = tool_bar_icon.subsample(4, 4)
tool_bar = Button(frame_quick, bd=0, image=small_logo, )
tool_bar.image = small_logo
button1_ttp = CreateToolTip(tool_bar, names[i])
tool_bar.pack(side='left')
test = StringVar()
select_test = ttk.Combobox(frame_quick, textvariable=test,state="readonly")
select_test['values']=['PEAT, Tono Click','PEAT, tono Burst', 'PEAT tono Chirp']
select_test.current(0)
select_test.pack(side='left')
def change_name(self):
self.Btn_iniciar.config(text="hhh")
def printSelection(i):
print(check_vars[i].get())
def plotter():
ax3.cla()
ax3.grid()
L1 = ax3.plot([1,2,3,4,5,6,7,8,9,10],[5,5,5,5,5,5,5,5,5,5])
graph.draw()
time.sleep(1)
def gui_handler():
change_state()
threading.Thread(target=OD_plotter).start()
if __name__ == '__main__':
root = Tk()
my_gui = windows(root)
root.mainloop()
|
spi_handler.py
|
#!/usr/bin/env python
import sys
sys.path.append("../../")
import rospy
from std_msgs.msg import String
import numpy as np
from sensor_msgs.msg import PointCloud2, PointField
from sensor_msgs.msg import CompressedImage
from std_msgs.msg import Header
from nav_msgs.msg import Odometry
import cv2
import threading
from global_localization.online.place_recognizer import PlaceRecognizer
from global_localization.online.feature_extractor import FeatureExtractor
from global_localization.online.pose_estimator import PoseEstimator
from global_localization.online.global_localizer import GlobalLocalizer
"""
This node handles (Submap Projection Image) SPI images for global localization use.
The node is written as a ROS node.
Inputs:
database SPI images
global pose of database SPI images
query SPI image
Outputs:
global pose of query SPI image
"""
__author__ = 'Yanhao LI <yanhao.li at outlook.com>'
__version__= '0.1'
__license__ = 'BSD'
database_spi_topic = "database_spi_image"
query_spi_topic = "query_spi_image"
def array2CompressedImage(array):
msg = CompressedImage()
msg.header.stamp = rospy.Time.now()
msg.format = "png"
msg.data = np.array(cv2.imencode('.png', array)[1]).tostring()
return msg
def CompressedImage2Array(compressed_image):
np_arr = np.fromstring(compressed_image.data, np.uint8)
image = cv2.imdecode(np_arr, cv2.IMREAD_GRAYSCALE)
# msg.format = "png"
# msg.data = np.array(cv2.imencode('.png', array)[1]).tostring()
return image
class SpiHandler(object):
def __init__(self):
super().__init__()
rospy.init_node('spi_handler', anonymous=True)
# self.database_spi_sub_ = rospy.Subscriber("query_spi_image", CompressedImage, self.db_spi_image_callback, queue_size=1)
### For Test Use ###
# self.place_recognizer_ = PlaceRecognizer()
# self.feature_extractor_ = FeatureExtractor()
# self.pose_estimator_ = PoseEstimator()
# self.image_id_ = 0
# self.query_spi_sub_ = rospy.Subscriber("query_spi_image", CompressedImage, self.query_spi_image_callback, queue_size=1)
### For Release Use ###
self.global_localizer_ = GlobalLocalizer()
self.query_spi_sub_ = rospy.Subscriber("/spi_image/compressed", CompressedImage, self.slam_spi_image_callback,
queue_size=1)
self.fake_spi_pub_ = rospy.Publisher('query_spi_image', CompressedImage, queue_size=10)
# sending_thread_ = threading.Thread(target=self.spi_image_player)
# sending_thread_.start()
def db_spi_image_callback(self, msg):
image = CompressedImage2Array(msg)
cv2.imshow("spi_image_callback", image)
cv2.waitKey(delay=1)
pose = np.identity(4)
self.place_recognizer_.save_spi(image)
print(image.shape)
def query_spi_image_callback(self, msg):
image = CompressedImage2Array(msg)
image_spinetvlad = cv2.resize(image, (600, 600), interpolation=cv2.INTER_LINEAR)
# print("decoded image msg", image.shape)
results = self.place_recognizer_.query_spi(image_spinetvlad)
if results is not None:
candidate_image_filenames = [result['image_file'] for result in results]
# cv2.imshow("query_image", image)
result_filename = "/media/li/lavie/dataset/birdview_dataset/00/" + candidate_image_filenames[0]
result_image = cv2.imread(result_filename)
# cv2.imshow("result_image", result_image)
# cv2.waitKey(delay=1)
# print("query result:", candidate_image_filenames)
image_features = cv2.resize(image, (400, 400), interpolation=cv2.INTER_LINEAR)
features = self.feature_extractor_.extract_features(image_features)
pose = np.identity(4)
image_dir = "/media/li/lavie/dataset/birdview_dataset/05/"
image_file = image_dir + "submap_" + str(self.image_id_) + ".png"
self.image_id_ += 1
image_info = {
"image_file": image_file,
"position": pose[:3,3],
"orientation": pose[:3,:3],
"features": features,
}
# print("features:", features)
match_result = self.pose_estimator_.estimate_pose(image_info, image_info)
print("match_result:", match_result is not None)
print("query done")
def slam_spi_image_callback(self, msg):
image = CompressedImage2Array(msg)
fake_pose = np.identity(4)
result = self.global_localizer_.handle_slam_spi(image, fake_pose, msg.header.seq)
# print("result:", result)
pose, score = result
if pose is not None:
position = pose[:3, 3]
print("position: ", position)
else:
print("query failed")
# print("query done")
def spi_image_player(self):
img_id = 0
rate = rospy.Rate(2.5) # 3 Hz
while not rospy.is_shutdown():
img_filename = "/media/li/lavie/dataset/birdview_dataset/05/submap_" + str(img_id) + ".png"
rospy.loginfo(img_filename)
image = cv2.imread(img_filename, cv2.IMREAD_GRAYSCALE)
msg = array2CompressedImage(image)
msg.header.seq = img_id
self.fake_spi_pub_.publish(msg)
img_id += 1
# cv2.imshow("spi_image", image)
# cv2.waitKey(delay=1)
rate.sleep()
pass
if __name__ == '__main__':
try:
sh = SpiHandler()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
offline_worker.py
|
import os
import time
import json
import copy
import cv2
import threading
import numpy as np
from shapely.geometry import Point, Polygon
from typing import Dict, List, Tuple
from dataclasses import dataclass
from concurrent.futures.thread import ThreadPoolExecutor
from nxs_libs.db import NxsDbFactory, NxsDbType
from nxs_libs.storage import NxsStorageFactory, NxsStorageType
from apps.vehicle_counting.app_types.app_request import (
InDbTrackingAppRequest,
RequestStatus,
)
from nxs_types.infer import (
NxsInferInput,
NxsInferInputType,
NxsTensorsInferRequest,
)
from nxs_types.infer_result import (
NxsInferDetectorBBoxLocation,
NxsInferDetectorResult,
NxsInferResult,
)
from apps.vehicle_counting.worker.utils import *
from datetime import datetime, timezone
DB_TASKS_COLLECTION_NAME = "tasks"
DB_COUNTS_COLLECTION_NAME = "counts"
DB_LOGS_COLLECTION_NAME = "logs"
STORAGE_LOGS_DIR_PATH = "logs"
class OfflineVehicleTrackingApp:
def __init__(
self,
video_uuid: str,
frame_width: int,
frame_height: int,
frame_rate: int,
nxs_infer_url: str,
nxs_api_key: str,
detector_uuid: str,
tracker_uuid: str,
video_url: str,
rois: List[NxsRoi],
lines: List[NxsLine],
tracking_classes: List[str] = ["car", "motorcycle", "bus", "truck"],
treat_all_classes_as_one: bool = False,
detector_min_score: float = 0.4,
detector_interval_secs: float = 1,
duplicate_iou_thresh: float = 0.65,
merge_iou_thresh: float = 0.5,
object_expiration_secs: float = 2,
tracking_score_thresh: float = 0.975,
skip_frame: int = 2,
collect_logs: bool = False,
blobstore_conn_str: str = "",
blobstore_container_name: str = "",
cosmosdb_conn_str: str = "",
cosmosdb_db_name: str = "",
counting_report_interval_secs: int = 30,
visualize: bool = False,
job_duration: int = 21600,
) -> None:
self.video_uuid = video_uuid
self.nxs_infer_url = nxs_infer_url
self.nxs_api_key = nxs_api_key
self.detector_uuid = detector_uuid
self.tracker_uuid = tracker_uuid
self.name = "VEHICLE_TRACKING_APP"
self.video_url = video_url
self.frame_width = frame_width
self.frame_height = frame_height
self.frame_rate = frame_rate
self.job_duration = job_duration
# self.cap = cv2.VideoCapture(video_url)
# self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
# self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# self.frame_rate = int(round(self.cap.get(cv2.CAP_PROP_FPS)))
self.rois = rois
self.lines = lines
self.tracking_classes = tracking_classes
self.treat_all_classes_as_one = treat_all_classes_as_one
self.detector_min_score = detector_min_score
self.detector_interval_secs = detector_interval_secs
self.duplicate_iou_thresh = duplicate_iou_thresh
self.merge_iou_thresh = merge_iou_thresh
self.object_expiration_secs = object_expiration_secs
self.tracking_score_thresh = tracking_score_thresh
self.total_extracted_frames: int = 0
self.total_processed_frames: int = 0
self.skip_frame = skip_frame
self.visualize = visualize
self.collect_logs = collect_logs
self.blobstore_conn_str = blobstore_conn_str
self.blobstore_container_name = blobstore_container_name
self.cosmosdb_conn_str = cosmosdb_conn_str
self.cosmosdb_db_name = cosmosdb_db_name
self.logs = []
self.log_id = 0
self.counting_report_interval_secs = counting_report_interval_secs
self.NUM_FRAMES_PER_SEC = int(self.frame_rate / (1 + self.skip_frame))
self.WINDOW_LENGTH = int(self.frame_rate * self.detector_interval_secs)
self.STOP_FLAG = False
self.track_dict: Dict[int, NxsTrack] = {}
self.track_count = 0
self.class_count_dicts: List[Dict[str, int]] = [{} for _ in lines]
for class_name in self.tracking_classes:
for class_count_dict in self.class_count_dicts:
class_count_dict[class_name] = 0
self.job_completed = False
self.video_ended = False
self.video_frames = []
self.video_frame_timestamps = []
self.starting_utc_time = datetime.now(timezone.utc)
self.starting_utc_ts = datetime.now(timezone.utc).timestamp()
self.downloaded_videos: List[Tuple[int, str]] = []
self.to_exit_threads = False
self.download_thr = threading.Thread(target=self.download_video_thread, args=())
self.download_thr.start()
self.decode_thr = threading.Thread(target=self.decode_video_thread, args=())
self.decode_thr.start()
def run_tracking(self):
def process_frames(obj_id: int, frames: List[np.ndarray]):
obj_track = self.track_dict[obj_id]
for frame_idx in range(1, len(frames)):
frame = frames[frame_idx]
infer_res = run_tracker(
self.nxs_infer_url,
self.tracker_uuid,
obj_track.templates[-1],
frame,
obj_track.track[-1],
self.nxs_api_key,
)
obj_track.track.append(infer_res.detections[0].bbox)
obj_track.track_scores.append(infer_res.detections[0].score)
if infer_res.detections[0].score < self.tracking_score_thresh:
obj_track.is_active = False
last_ts = -1
last_frame_ts = -1
miss_deadline = 0
hit_deadline = 0
avg_lat = 0
count = 0
while not self.STOP_FLAG:
t0 = time.time()
frames, frames_timestamps, is_end_of_video = self.get_batch(
self.WINDOW_LENGTH
)
if last_frame_ts >= self.job_duration * 1000:
self.job_completed = True
self.report_counting(last_frame_ts)
self.to_exit_threads = True
break
if is_end_of_video or self.job_completed:
# we don't really need to track the last-window
if last_frame_ts > 0:
self.report_counting(last_frame_ts)
self.to_exit_threads = True
break
if len(frames_timestamps) == 0:
time.sleep(0.1)
continue
if frames_timestamps[-1] > 0:
last_frame_ts = frames_timestamps[-1]
else:
for idx in range(len(frames_timestamps) - 1, -1, -1):
if frames_timestamps[idx] > 0:
last_frame_ts = frames_timestamps[idx]
self.remove_inactive_tracks()
self.remove_out_of_rois_tracks()
self.remove_expired_objects()
dets = run_detector(
self.nxs_infer_url, self.detector_uuid, frames[0], self.nxs_api_key
).detections
self.process_detections(frames[0], dets)
tracking_args = []
for obj_id in self.track_dict:
tracking_args.append((obj_id, frames))
if len(tracking_args) > 0:
executor = ThreadPoolExecutor(max_workers=min(16, len(tracking_args)))
results = []
for args in tracking_args:
f = executor.submit(process_frames, *args)
results.append(f)
executor.shutdown(wait=True)
for r in results:
_ = r.result()
# count objects
for frame_idx in range(len(frames)):
if self.visualize:
vis_frame = np.array(frames[frame_idx])
if self.collect_logs and frame_idx == len(frames) - 1:
log_frame = np.array(frames[frame_idx])
for obj_id in self.track_dict:
obj_track = self.track_dict[obj_id]
bboxes = obj_track.track[
: len(obj_track.track) - len(frames) + frame_idx + 1
]
scores = obj_track.track_scores[
: len(obj_track.track) - len(frames) + frame_idx + 1
]
if not obj_track.is_counted:
if self.is_passing_line(bboxes, self.lines[obj_track.roi_idx]):
obj_track.is_counted = True
self.class_count_dicts[obj_track.roi_idx][
obj_track.class_name
] += 1
break
if self.visualize:
vis_frame = self.draw_obj(
vis_frame, obj_id, bboxes[-1], scores[-1]
)
if self.collect_logs and frame_idx == len(frames) - 1:
log_frame = self.draw_obj(
log_frame, obj_id, bboxes[-1], scores[-1]
)
if self.visualize:
vis_frame = self.draw_rois(vis_frame)
vis_frame = self.draw_lines(vis_frame)
vis_frame = self.draw_frame_number(
vis_frame, self.total_processed_frames + frame_idx
)
self.visualize_frame(vis_frame)
if self.collect_logs and frame_idx == len(frames) - 1:
log_frame = self.draw_rois(log_frame)
log_frame = self.draw_lines(log_frame)
log_frame = self.draw_frame_number(
log_frame, self.total_processed_frames + frame_idx
)
self.total_processed_frames += len(frames)
if last_ts < 0:
last_ts = frames_timestamps[0]
else:
if (
frames_timestamps[-1] - last_ts
>= self.counting_report_interval_secs * 1000
):
self.report_counting(frames_timestamps[-1])
last_ts = frames_timestamps[-1]
if self.collect_logs:
self.snapshot_stats(log_frame, frames_timestamps[-1])
if len(self.logs) >= 10:
print("uploading logs")
self.upload_logs()
print("finished uploading logs")
lat = time.time() - t0
avg_lat = (avg_lat * count + lat) / (count + 1)
count += 1
if lat > 1:
miss_deadline += 1
else:
hit_deadline += 1
miss_rate = float(miss_deadline) / (miss_deadline + hit_deadline)
# print(self.total_processed_frames, len(self.track_dict), lat)
print(f"Total processed frames: {self.total_processed_frames}")
print(f"Total objects this round: {len(self.track_dict)}")
print(f"Latency this round: {lat} secs")
print(f"Avg latency: {avg_lat} secs")
print(f"Miss rate: {miss_rate}")
print(self.class_count_dicts)
print("")
def report_counting(self, ts):
cosmosdb_client = NxsDbFactory.create_db(
NxsDbType.MONGODB,
uri=self.cosmosdb_conn_str,
db_name=self.cosmosdb_db_name,
)
completed_percent = min(1.0, ts / (self.job_duration * 1000)) * 100
ending_ts = self.starting_utc_ts + ts / 1000
data = {
"zone": "global",
"video_uuid": self.video_uuid,
"timestamp": ts,
"counts": copy.deepcopy(self.class_count_dicts),
"completed_percent": completed_percent,
"starting_utc_time": str(self.starting_utc_time),
"starting_utc_ts": self.starting_utc_ts,
"ending_utc_time": str(datetime.utcfromtimestamp(ending_ts)),
"ending_utc_ts": ending_ts,
}
cosmosdb_client.insert(
DB_COUNTS_COLLECTION_NAME,
data,
)
def snapshot_stats(self, frame, frame_ts):
track_snapshots = []
for obj_id in self.track_dict:
track = self.track_dict[obj_id]
if track.is_active:
bbox = track.track[-1]
cloned_track = {
"id": track.id,
"class_name": track.class_name,
"is_counted": track.is_counted,
"bbox": [bbox.left, bbox.top, bbox.right, bbox.bottom],
"score": track.track_scores[-1],
}
track_snapshots.append(cloned_track)
self.logs.append(
(
frame,
frame_ts,
copy.deepcopy(self.class_count_dicts),
track_snapshots,
)
)
def upload_logs(self):
storage_client = NxsStorageFactory.create_storage(
NxsStorageType.AzureBlobstorage,
connection_str=self.blobstore_conn_str,
container_name=self.blobstore_container_name,
)
cosmosdb_client = NxsDbFactory.create_db(
NxsDbType.MONGODB,
uri=self.cosmosdb_conn_str,
db_name=self.cosmosdb_db_name,
)
cosmosdb_client.insert(
DB_LOGS_COLLECTION_NAME,
{
"zone": "global",
"video_uuid": self.video_uuid,
"log_id": self.log_id,
"start_ts": self.logs[0][1],
"end_ts": self.logs[-1][1],
"num_logs": len(self.logs),
},
)
logs = []
for log_idx, log in enumerate(self.logs):
frame, frame_idx, counts, snapshots = log
frame_file_name = f"{self.video_uuid}_{self.log_id}_{log_idx}.jpg"
cv2.imwrite(frame_file_name, frame)
storage_client.upload(frame_file_name, STORAGE_LOGS_DIR_PATH, True)
os.remove(frame_file_name)
logs.append({"counts": counts, "snapshots": snapshots})
tmp_log_path = f"{self.video_uuid}_{self.log_id}.txt"
json.dump(logs, open(tmp_log_path, "w"))
storage_client.upload(tmp_log_path, STORAGE_LOGS_DIR_PATH, True)
os.remove(tmp_log_path)
self.logs.clear()
self.log_id += 1
def get_free_idx(self) -> int:
self.track_count += 1
return self.track_count
def download_video_thread(self):
print("Download thread is started...")
base_url = self.video_url[: self.video_url.rindex("/")]
last_downloaded = []
starting_ts = time.time()
idx = 0
while (
not self.video_ended and not self.job_completed and not self.to_exit_threads
):
if time.time() - starting_ts > 1.1 * self.job_duration:
self.video_ended = True
break
chunk_names = []
for retry in range(12):
try:
data = requests.get(self.video_url).content.decode("utf-8")
lines = data.split("\n")
for line in lines:
if ".ts" in line:
chunk_names.append(line)
break
except:
time.sleep(5)
if retry == 11:
self.video_ended = True
if self.video_ended:
break
# download chunks into files
# chunk_names.sort()
for chunk_name in chunk_names:
if chunk_name in last_downloaded:
continue
print(f"Downloading chunk {chunk_name}\n")
chunk_idx = int(chunk_name.replace(".ts", "").split("_")[-1])
chunk_url = f"{base_url}/{chunk_name}"
for _ in range(3):
try:
data = requests.get(chunk_url, allow_redirects=True).content
chunk_path = f"chunk_{idx}"
open(chunk_path, "wb").write(data)
self.downloaded_videos.append((chunk_idx, chunk_path))
break
except:
time.sleep(1)
if len(last_downloaded) > 10:
last_downloaded.pop(0)
if chunk_name not in last_downloaded:
last_downloaded.append(chunk_name)
idx += 1
time.sleep(3)
print("Download thread is stopped...")
def decode_video_thread(self):
print("Decode thread is started...")
last_chunk_idx = -1
frame_idx = 0
self.starting_utc_time = datetime.now(timezone.utc)
self.starting_utc_ts = self.starting_utc_time.timestamp()
video_ts = 0
chunk_lens = [] # in secs
while not self.to_exit_threads:
if video_ts >= self.job_duration * 1000:
self.job_completed = True
break
if not self.downloaded_videos and self.video_ended:
# could not get any more frames
break
if not self.downloaded_videos:
time.sleep(1)
continue
if len(self.video_frames) > 5 * self.NUM_FRAMES_PER_SEC:
time.sleep(1)
continue
chunk_idx, chunk_path = self.downloaded_videos.pop(0)
print(f"Decoding chunk {chunk_path}\n")
if last_chunk_idx > 0 and chunk_idx < last_chunk_idx:
delta = chunk_idx - last_chunk_idx - 1
if delta > 0:
# some chunks are missing
video_ts += delta * np.mean(chunk_lens) * 1000
cap = cv2.VideoCapture(chunk_path)
fps = cap.get(cv2.CAP_PROP_FPS)
if fps == 0:
fps = self.frame_rate
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
chunk_len = frame_count / fps
chunk_lens.append(chunk_len)
frame_time = (1.0 / fps) * 1000
while True:
_, img = cap.read() # BGR
if isinstance(img, type(None)):
break
video_ts += frame_time
if frame_idx % (self.skip_frame + 1) == 0:
self.video_frame_timestamps.append(video_ts)
self.video_frames.append(img)
frame_idx += 1
self.total_extracted_frames += 1
cap.release()
try:
os.remove(chunk_path)
except:
pass
last_chunk_idx = chunk_idx
print("Decode thread is stopped...")
"""
def get_batch(self, batch_size):
images = []
timestamps = []
is_end_of_video = False
for _ in range(batch_size):
_, img = self.cap.read() # BGR
if not isinstance(img, type(None)):
if self.total_extracted_frames % (self.skip_frame + 1) == 0:
timestamps.append(self.cap.get(cv2.CAP_PROP_POS_MSEC))
images.append(img)
else:
is_end_of_video = True
break
self.total_extracted_frames += 1
return images, timestamps, is_end_of_video
"""
def get_batch(self, batch_size):
images = []
timestamps = []
is_end_of_video = False
batch_size = int(float(batch_size) / (1 + self.skip_frame))
if len(self.video_frames) >= batch_size:
for _ in range(batch_size):
timestamps.append(self.video_frame_timestamps.pop(0))
images.append(self.video_frames.pop(0))
elif len(self.video_frames) < batch_size and self.video_ended:
for _ in range(len(self.video_frames)):
timestamps.append(self.video_frame_timestamps.pop(0))
images.append(self.video_frames.pop(0))
is_end_of_video = True
else:
is_end_of_video = self.video_ended
return images, timestamps, is_end_of_video
def is_passing_line(
self, bboxes: List[NxsInferDetectorBBoxLocation], line: NxsLine
):
if len(bboxes) < 6:
return False
vs = []
for bbox in bboxes:
center_x = (bbox.left + bbox.right) // 2
center_y = (bbox.top + bbox.bottom) // 2
v1 = [line.p0.x - center_x, line.p0.y - center_y]
v2 = [line.p1.x - center_x, line.p1.y - center_y]
v = [v1[0] + v2[0], v1[1] + v2[1]]
vs.append(v)
for i in range(3):
v1 = vs[i]
v2 = vs[-i - 1]
if v1[0] * v1[1] * v2[0] * v2[1] > 0:
return False
return True
def remove_inactive_tracks(self):
inactive_track_ids = []
for obj_id in self.track_dict:
track = self.track_dict[obj_id]
if not track.is_active:
inactive_track_ids.append(obj_id)
for obj_id in inactive_track_ids:
self.track_dict.pop(obj_id, None)
def remove_out_of_rois_tracks(self):
out_of_rois_obj_ids = []
for obj_id in self.track_dict:
track = self.track_dict[obj_id]
last_bbox = track.track[-1]
roi = Polygon(self.rois[track.roi_idx].to_ndarray())
if not (
Point(last_bbox.left, last_bbox.top).within(roi)
or Point(last_bbox.right, last_bbox.top).within(roi)
or Point(last_bbox.left, last_bbox.bottom).within(roi)
or Point(last_bbox.right, last_bbox.bottom).within(roi)
):
out_of_rois_obj_ids.append(obj_id)
for obj_id in out_of_rois_obj_ids:
self.track_dict.pop(obj_id, None)
# print("remove_out_of_rois_tracks", f"removed obj {obj_id}")
def remove_expired_objects(self):
expired_ids = []
for obj_id in self.track_dict:
track = self.track_dict[obj_id]
if (
self.total_processed_frames - track.last_frame_idx
> self.NUM_FRAMES_PER_SEC * self.object_expiration_secs
):
expired_ids.append(obj_id)
for obj_id in expired_ids:
self.track_dict.pop(obj_id, None)
def process_detections(self, frame: np.ndarray, dets: List[NxsInferDetectorResult]):
duplicate_obj_ids = []
for det in dets:
if det.class_name not in self.tracking_classes:
continue
if det.score < self.detector_min_score:
continue
within_rois = False
for roi_idx, roi in enumerate(self.rois):
if self.is_in_roi(det.bbox, roi):
within_rois = True
break
if not within_rois:
continue
# match this detection with tracking objects
(
best_obj_id,
best_iou,
matched_obj_ids,
) = self.find_matched_objects(det)
for obj_idx in matched_obj_ids:
if obj_idx != best_obj_id:
duplicate_obj_ids.append(obj_idx)
# update best matched obj
if best_iou > self.merge_iou_thresh:
matched_track = self.track_dict[best_obj_id]
matched_track.dets.append(det)
matched_track.track.append(det.bbox)
matched_track.track_scores.append(det.score)
matched_track.last_frame_idx = self.total_processed_frames
continue
new_obj_id = self.get_free_idx()
template = preprocess_examplar(
frame,
[
det.bbox.left,
det.bbox.top,
det.bbox.right,
det.bbox.bottom,
],
)
self.track_dict[new_obj_id] = NxsTrack(
id=new_obj_id,
class_name=det.class_name,
is_active=True,
is_counted=False,
start_frame_idx=self.total_processed_frames,
last_frame_idx=self.total_processed_frames,
templates=[template],
dets=[det],
track=[det.bbox],
track_scores=[det.score],
roi_idx=roi_idx,
)
# remove duplicate objects
for obj_idx in duplicate_obj_ids:
self.track_dict.pop(obj_idx, None)
def find_matched_objects(
self, det: NxsInferDetectorResult
) -> Tuple[int, float, List[int]]:
best_matched_obj_id = 0
best_iou = 0
matched_obj_ids: List[int] = []
for obj_id in self.track_dict:
track = self.track_dict[obj_id]
if track.start_frame_idx == self.total_processed_frames:
# ignore just-added track
continue
# prev_det = track.dets[-1]
if not self.treat_all_classes_as_one and det.class_name != track.class_name:
continue
iou = compute_iou(det.bbox, track.track[-1])
if iou > best_iou:
best_iou = iou
best_matched_obj_id = obj_id
if iou > self.duplicate_iou_thresh:
matched_obj_ids.append(obj_id)
return best_matched_obj_id, best_iou, matched_obj_ids
def is_in_roi(self, det: NxsInferDetectorBBoxLocation, roi: NxsRoi):
center_x = int((det.left + det.right) / 2)
center_y1 = int(0.75 * det.top + 0.25 * det.bottom)
center_y2 = int(0.25 * det.top + 0.75 * det.bottom)
p1 = Point(center_x, center_y1)
p2 = Point(center_x, center_y2)
roi = Polygon(roi.to_ndarray())
if p1.within(roi) or p2.within(roi):
return True
return False
def draw_obj(
self,
frame,
obj_id: int,
bbox: NxsInferDetectorBBoxLocation,
score: float,
color=(0, 0, 255),
thickness=1,
):
frame = cv2.rectangle(
frame,
(bbox.left, bbox.top),
(bbox.right, bbox.bottom),
color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
cv2.putText(
frame,
f"{obj_id}",
(bbox.left, bbox.top - 3),
0,
1,
[0, 255, 0],
thickness=1,
lineType=cv2.LINE_AA,
)
return frame
def draw_frame_number(self, frame, frame_idx: int):
return cv2.putText(
frame,
f"{frame_idx}",
(50, 50),
0,
1,
[0, 0, 255],
thickness=1,
lineType=cv2.LINE_AA,
)
def draw_det(
self,
frame,
det: NxsInferDetectorResult,
color=(0, 0, 255),
thickness=1,
):
return cv2.rectangle(
frame,
(det.bbox.left, det.bbox.top),
(det.bbox.right, det.bbox.bottom),
color,
thickness=thickness,
lineType=cv2.LINE_AA,
)
def draw_rois(self, frame):
for roi in self.rois:
frame = self.draw_roi(frame, roi)
return frame
def draw_roi(self, frame, roi: NxsRoi):
if not roi:
return frame
cv2.polylines(
frame,
np.array([[roi.to_ndarray()]], dtype=np.int32),
True,
(0, 255, 0),
thickness=3,
)
return frame
def draw_lines(self, frame):
for line_idx, line in enumerate(self.lines):
label = ""
for class_name in self.class_count_dicts[line_idx]:
label += str(self.class_count_dicts[line_idx][class_name]) + " "
cv2.putText(
frame,
label,
(line.p0.x, line.p0.y),
0,
1,
[225, 255, 255],
thickness=1,
lineType=cv2.LINE_AA,
)
frame = self.draw_line(frame, line)
return frame
def draw_line(self, frame, line: NxsLine, color=(0, 0, 255), thickness=1):
if not line:
return frame
frame = cv2.line(
frame,
(line.p0.x, line.p0.y),
(line.p1.x, line.p1.y),
color,
thickness,
)
return frame
def visualize_frame(self, frame):
while True:
cv2.imshow(self.name, frame)
key = cv2.waitKey(1)
if key & 0xFF == ord("q"):
self.STOP_FLAG = True
break
if key & 0xFF == ord("c"):
break
# cv2.imshow(self.name, frame)
# key = cv2.waitKey(1)
# if key & 0xFF == ord("q"):
# self.STOP_FLAG = True
|
run.py
|
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "hellonodejs"
actionName = "hello-nodejs"
params = ""
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,startTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + \
',' + clientResult[j][2] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','','']
i = 0
count = 0
while count < 3:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main()
|
ftp.py
|
""" import & globals """
import ftplib
import os
import threading
""" test connection """
def test_ftpconnection(server, port, user, password, path, log):
try:
""" login to ftp server """
ftp = ftplib.FTP()
ftp.connect(server, port)
ftp.login(user=user, passwd=password)
""" check fpt_path exist ? """
ftp.cwd(path)
""" logout ftp session """
ftp.quit()
""" return """
return True, None
except ftplib.all_errors as e:
return False, 'Error in ftp login ({!s}:{!s}) = {!s}'.format(server, port, e)
""" ftp upload to server """
def ftp_upload_file(log, server, port, user, password, path, filename, localfile):
try:
""" login to ftp server """
ftp = ftplib.FTP()
ftp.set_pasv(True)
ftp.connect(server, port, timeout=300)
ftp.login(user=user, passwd=password)
log.debug('login to ftp {!s}:{!s} succeeded'.format(server,port))
""" check fpt_path exist ? """
ftp.cwd(path)
""" upload file """
log.debug('upload file {} with {!s} size'.format(localfile, os.path.getsize(localfile)))
if os.path.getsize(localfile) >= 1024:
ftp.storbinary('STOR ' + filename, open(localfile, 'rb'), 1024, ftp_keepalive(log, ftp))
else:
ftp.storbinary('STOR ' + filename, open(localfile, 'rb'))
log.debug('upload file {} to ftp {!s}:{!s} succeeded'.format(localfile, server,port))
""" logout ftp session """
ftp.quit()
""" return """
return True, None
except ftplib.all_errors as e:
return False, 'Error in ftp upload ({!s}:{!s}) = {!s}'.format(server, port, e)
def ftp_keepalive(log, ftp):
try:
""" send keepalive on command line """
ftp.voidcmd('NOOP')
log.debug('ftp keepalive')
except ftplib.all_errors as e:
log.error('Error send keepalive = {!s}'.format(e))
def ftp_upload_file2(log, server, port, user, password, path, filename, localfile):
try:
""" login to ftp server """
ftp = ftplib.FTP()
ftp.set_pasv(True)
ftp.connect(server, port)
ftp.login(user=user, passwd=password)
log.debug('login to ftp {!s}:{!s} succeeded'.format(server,port))
""" check fpt_path exist ? """
ftp.cwd(path)
#""" define socket for command """
#sock = ftp.transfercmd('STOR ' + filename)
def background():
ftp.storbinary('STOR ' + filename, open(localfile, 'rb'))
log.debug('upload file {} to ftp {!s}:{!s} succeeded'.format(localfile, server,port))
"""
f = open(localfile, 'rb')
while True:
block = sock.sendfile(file=f, count=1024)
if not block:
break
#f.write(block)
sock.close() """
t = threading.Thread(target=background)
t.start()
while t.is_alive():
t.join(30)
ftp.voidcmd('NOOP')
log.debug('NOOP send to ftp {!s}:{!s} succeeded'.format(server, port))
return True, None
except ftplib.all_errors as e:
return False, 'Error in ftp upload ({!s}:{!s}) = {!s}'.format(server, port, e)
|
ws.py
|
import threading
import time
from channels.generic.websocket import JsonWebsocketConsumer
from kubeops_api.models.deploy import DeployExecution
class F2OWebsocket(JsonWebsocketConsumer):
disconnected = False
execution_id = None
def connect(self):
self.execution_id = self.scope['url_route']['kwargs']['execution_id']
if self.execution_id is None:
raise Exception('execution_id not be None!')
self.accept()
self.send_deploy_execution()
def send_deploy_execution(self):
def func():
while not self.disconnected:
data = DeployExecution.objects.filter(id=self.execution_id).first().to_json()
self.send_json({'message': data})
time.sleep(1)
thread = threading.Thread(target=func)
thread.start()
def disconnect(self, close_code):
self.disconnected = True
self.close()
|
dvrk_move_wait_test.py
|
#!/usr/bin/env python
# Author: Anton Deguet
# Date: 2021-01-29
# (C) Copyright 2021 Johns Hopkins University (JHU), All Rights Reserved.
# --- begin cisst license - do not edit ---
# This software is provided "as is" under an open source license, with
# no warranty. The complete license can be found in license.txt and
# http://www.cisst.org/cisst/license.txt.
# --- end cisst license ---
# Start a single arm using
# > rosrun dvrk_robot dvrk_console_json -j <console-file>
# To communicate with the arm using ROS topics, see the python based example dvrk_arm_test.py:
# > rosrun dvrk_python dvrk_arm_test.py <arm-name>
import argparse
import sys
import time
import threading
import rclpy
import dvrk
import math
import numpy
import PyKDL
# ros init node so we can use default ros arguments (e.g. __ns:= for namespace)
rclpy.init(args = sys.argv)
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--arm', type=str, required=True,
choices=['ECM', 'MTML', 'MTMR', 'PSM1', 'PSM2', 'PSM3'],
help = 'arm name corresponding to ROS topics without namespace. Use __ns:= to specify the namespace')
parser.add_argument('-i', '--interval', type=float, default=0.01,
help = 'expected interval in seconds between messages sent by the device')
args = parser.parse_args(sys.argv[1:]) # skip argv[0], script name
node = rclpy.create_node('dvrk_move_wait_test', namespace = args.arm)
arm = dvrk.arm(arm_name = node.get_namespace(),
ros_node = node,
expected_interval = args.interval)
executor = rclpy.executors.MultiThreadedExecutor()
executor.add_node(node)
executor_thread = threading.Thread(target = executor.spin, daemon = True)
executor_thread.start()
print('starting move_jp')
# get current position
initial_joint_position = numpy.copy(arm.setpoint_jp())
amplitude = math.radians(10.0)
goal = numpy.copy(initial_joint_position)
print('--> Testing the trajectory with wait()')
start_time = time.time()
# first motion
goal[0] = initial_joint_position[0] + amplitude
arm.move_jp(goal).wait()
# second motion
goal[0] = initial_joint_position[0] - amplitude
arm.move_jp(goal).wait()
# back to initial position
arm.move_jp(initial_joint_position).wait()
print('--> Time for the full trajectory: %f seconds' % (time.time() - start_time))
print('--> Testing the trajectory with busy loop')
start_time = time.time()
# first motion
goal[0] = initial_joint_position[0] + amplitude
counter = 0
handle = arm.move_jp(goal)
while handle.is_busy():
counter = counter + 1
sys.stdout.write('\r---> Loop counter: %d' % (counter))
sys.stdout.flush()
# second motion
goal[0] = initial_joint_position[0] - amplitude
handle = arm.move_jp(goal)
while handle.is_busy():
counter = counter + 1
sys.stdout.write('\r---> Loop counter: %d' % (counter))
sys.stdout.flush()
# back to initial position
handle = arm.move_jp(initial_joint_position)
while handle.is_busy():
counter = counter + 1
sys.stdout.write('\r---> Loop counter: %d' % (counter))
sys.stdout.flush()
print('')
print('--> Time for the full trajectory: %f seconds' % (time.time() - start_time))
print('--> You can change the trajectory velocity in the GUI using "%s", "Direct control" and lower the "100%%" factor. Then re-run this program.' % (args.arm))
print('---> Stopping ROS thread')
rclpy.shutdown()
executor_thread.join()
node.destroy_node()
|
motors.py
|
import os
import logging
import time
from threading import Thread
from navio.pwm import PWM
pi = os.getenv('PI', False)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
T100 = 't100'
SERVO = 'servo'
# map of duty cycle settings in milliseconds which is the
# units expected by the navio PWM module.
# For more info on the T100 controller specs see
# https://www.bluerobotics.com/store/thrusters/besc-30-r1/
T100_PWM_MAP = {
'max_forward': int((1900 - 1525) * 0.9) + 1525, # limit max mower to avoid burning out motor
'min_forward': 1525,
'stopped': 1500,
'min_reverse': 1475,
'max_reverse': 1475 - int((1475 - 1100) * 0.9), # limit max mower to avoid burning out motor
}
SERVO_PWM_MAP = T100_PWM_MAP
PWM_FREQUENCY = 50 # Hz
def _calculate_value_in_range(min_val, max_val, percentage):
"""Get the value within a range based on percentage.
Example:
A percentage 0.0 maps to min_val
A percentage of 1.0 maps to max_val
"""
value_range = max_val - min_val
return min_val + int(percentage * value_range)
class Motor:
"""An interface class to allow simple acces to motor functions"""
def __init__(self, name, rc_channel, motor_type=T100):
self.name = name
self.rc_channel = rc_channel
if motor_type == T100:
self.pwm_map = T100_PWM_MAP
elif motor_type == SERVO:
self.pwm_map = SERVO_PWM_MAP
else:
raise ValueError('Unknown motor_type')
self._speed = 0
self.duty_cycle_ms = self.pwm_map['stopped'] / 1000
self.pwm = PWM(self.rc_channel - 1)
self.initialized = False
# start the update loop in a thread
Thread(target=self._update).start()
def initialize(self):
"""Must call to initialize the motor
We run the initialization sequence in a thread to avoid blocking
the caller since this can take a few seconds.
"""
Thread(target=self._initialize).start()
def _initialize(self):
if pi:
self.pwm.initialize()
# setting the period is required before enabling the pwm channel
self.pwm.set_period(PWM_FREQUENCY)
self.pwm.enable()
# To arm the ESC a "stop signal" is sent and held for 1s (up to 2s works too)
# if you wait too long after the arming process to send the fist command to the ESC,
# it will shut off and you will have to re-initialize
self.stop()
self.pwm.set_duty_cycle(self.duty_cycle_ms)
time.sleep(1)
logger.debug('{} Motor: initialized'.format(self.name.title()))
self.initialized = True
def _update(self):
"""Set the duty cycle on the motor controllers
The ESC's need to get a signal sent consistently as a heartbeat
and thus have the duty cycle set every loop.
"""
while True:
if pi and self.initialized:
self.pwm.set_duty_cycle(self.duty_cycle_ms)
# sleep a bit to avoid jitter in the speed
time.sleep(0.1)
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, value):
"""Must be value betweeon -100 and 100
Negative values indicate the motor is running in reverse.
"""
# clamp the speed between -100 and 100
value = max(-100, value)
value = min(100, value)
duty_cycle = self.pwm_map['stopped']
if value > 0:
duty_cycle = _calculate_value_in_range(
min_val=self.pwm_map['min_forward'],
max_val=self.pwm_map['max_forward'],
percentage=value / 100,
)
elif value < 0:
duty_cycle = _calculate_value_in_range(
min_val=self.pwm_map['min_reverse'],
max_val=self.pwm_map['max_reverse'],
percentage=abs(value) / 100,
)
self._speed = value
self.duty_cycle_ms = duty_cycle / 1000 # convert to milliseconds
logger.debug('{} Motor: speed updated to ({} %, {} us)'.format(self.name.title(), value, self.duty_cycle_ms))
def forward(self, speed):
self.speed = abs(speed)
def reverse(self, speed):
self.speed = -abs(speed)
def stop(self):
self.speed = 0
def __repr__(self):
return 'Motor(name={}, rc_channel={})'.format(self.name, self.rc_channel)
__str__ = __repr__
|
index.py
|
from utils import thread_delay
import threading
import _thread
import time
def start_and_wait_thread_finish(*args):
for thr in args:
thr.start()
for thr in args:
# wait thread finish to execute the next lines
thr.join()
def volume_cube(a):
print('Volume of cube: ', a**3)
def volume_square(a):
print('Volume of Square: ', a*a)
if __name__ == '__main__':
_thread.start_new_thread(thread_delay, ('t1', 1))
_thread.start_new_thread(thread_delay, ('t2', 5))
t3 = threading.Thread(target=thread_delay, args=('t3', 2))
t4 = threading.Thread(target=thread_delay, args=('t4', 3))
start_and_wait_thread_finish(t3, t4)
print('\n\nThread execution is complete!\n')
th_volume_1 = threading.Thread(target=volume_cube, args=(2,))
th_volume_2 = threading.Thread(target=volume_square, args=(3,))
start_and_wait_thread_finish(th_volume_1, th_volume_2)
print('\n\nVolumes threading are complete!\n')
time.sleep(12000)
|
galaxy_loggers-2.0.0.py
|
#!/usr/bin/python
#-*- coding: UTF-8 -*-
#
# galaxy_loggers-2.0.0.py
#
# 生成测试数据文件
#
# see: $APPHELP
#
# ZhangLiang, 350137278@qq.com
#
# LOGS:
# -- 2017-12-08: first created
# -- 2018-03-02: last updated and released
########################################################################
import os, sys, signal, shutil, inspect, commands
import importlib
import time, datetime
import optparse, ConfigParser
import multiprocessing, threading
from multiprocessing import Process, Queue
from Queue import Empty, Full
########################################################################
# application specific
APPFILE = os.path.realpath(sys.argv[0])
APPHOME = os.path.dirname(APPFILE)
APPNAME,_ = os.path.splitext(os.path.basename(APPFILE))
APPVER = "2.0.0"
APPHELP = "Control Script for producing a mass of test csv files."
########################################################################
# import your local modules
import utils.utility as util
import utils.evntlog as elog
########################################################################
# 用户可以根据系统和需求设置 loggers 进程数: LOGGER_WORKERS_MAX
# 最大 loggers 支持, 可以更改
#
LOGGER_WORKERS_MAX = 200
########################################################################
exit_event, exit_queue = (threading.Event(), Queue(LOGGER_WORKERS_MAX))
# 子进程结束时, 父进程会收到这个信号。
# 如果父进程没有处理这个信号,也没有等待(wait)子进程,子进程虽然终止,
# 但是还会在内核进程表中占有表项,这时的子进程称为僵尸进程。
# 这种情况我们应该避免.
#
# 父进程或者忽略SIGCHILD信号,或者捕捉它,或者wait它派生的子进程,
# 或者父进程先终止,这时子进程的终止自动由init进程来接管
#
def onSigChld(signo, frame):
global exit_queue, exit_event
pid, status = os.waitpid(-1, os.WNOHANG)
if pid:
elog.error("child#%d on signal: SIGCHLD.", pid)
exit_queue.put(('EXIT', "child#%d on signal: SIGCHLD." % pid))
exit_event.set()
pass
def onSigInt(signo, frame):
global exit_queue, exit_event
exit_queue.put(('EXIT', "process#%d on signal: SIGINT." % os.getpid()))
exit_event.set()
pass
def onSigTerm(signo, frame):
global exit_queue, exit_event
exit_queue.put(('EXIT', "process#%d on signal: SIGTERM." % os.getpid()))
exit_event.set()
pass
########################################################################
def load_logger_workers(loggers_dir, workers, loggerConfig):
loggers = {}
worker_modules = ["%s.%s" % (loggers_dir, workers[i]) for i in range(0, len(workers))]
try:
for worker in worker_modules:
elog.debug("import %s", worker)
module = importlib.import_module(worker)
loggers[worker] = (module, loggerConfig)
pass
return loggers
except ImportError as ie:
elog.error("%r", ie)
sys.exit(-1)
pass
########################################################################
def logger_worker(loggerSet, config, exit_queue, timeout_ms):
(loggerModule, loggerConfig) = loggerSet
loggerClass = loggerModule.create_logger_instance(loggerConfig)
elog.force("worker process(%d) for %s start ...", os.getpid(), loggerClass.logger_name)
is_exit, exit_arg = (False, None)
while not is_exit:
is_exit, exit_arg = util.is_exit_process(exit_queue, timeout_ms)
if is_exit:
exit_queue.put(('EXIT', exit_arg))
break
loggerModule.log_messages(loggerClass)
pass
else:
elog.fatal("worker process exit: %r", exit_arg)
pass
########################################################################
def run_forever(processes, exit_event):
for name, proc in processes.items():
elog.force("start worker process: %s", name)
proc.start()
idle_queue = Queue(1)
while not exit_event.isSet():
try:
func, arg = idle_queue.get(block=True, timeout=3)
except Empty:
pass
else:
for name, proc in processes.items():
exit_queue.put(('EXIT', 'main process exit.'))
for name, proc in processes.items():
proc.join()
elog.force("main process exit.")
pass
########################################################################
# start worker loggers
#
def startup(loggers, config):
processes = {}
# 主进程退出信号
signal.signal(signal.SIGINT, onSigInt)
signal.signal(signal.SIGTERM, onSigTerm)
for loggerClassName in loggers:
elog.force("create process for logger: %s", loggerClassName)
p = Process(target = logger_worker, args = (loggers[loggerClassName], config, exit_queue, 0.01))
p.daemon = True
processes[loggerClassName] = p
pass
run_forever(processes, exit_event)
pass
########################################################################
def list_logger_workers(logConfigDict, workersDir):
found_workers = []
for loggerName in logConfigDict['loggers']:
worker_py = "%s.py" % loggerName
if worker_py in os.listdir(workersDir):
found_workers.append(loggerName)
pass
found_workers.sort()
return found_workers
########################################################################
def reset_logger_position(loggers, workersDir, start_time, start_rowid):
for loggerName in loggers:
_, worker = os.path.splitext(loggerName)
position_file = os.path.join(workersDir, "%s.position" % worker)
st_dt = time.strptime(start_time, '%Y-%m-%d %H:%M:%S')
start_tstamp = int(time.mktime(st_dt))
line = "%d,%d\n" % (start_tstamp, start_rowid)
util.write_first_line_nothrow(position_file, line)
elog.force("%d ('%s'), %d => position file: %s", start_tstamp, start_time, start_rowid, position_file)
pass
########################################################################
def add_logger(config, found_workers):
import yaml
from copy import deepcopy
NN = len(found_workers)
if NN > LOGGER_WORKERS_MAX:
elog.warn("too many loggers(>%d) to add", LOGGER_WORKERS_MAX)
return
loggerPrefix = found_workers[0].split('-')[0]
newLoggerName = "%s-%d" % (loggerPrefix, NN)
while newLoggerName in found_workers:
NN = NN + 1
newLoggerName = "%s-%d" % (loggerPrefix, NN)
# add loggerNN:
logger0 = os.path.join(config['loggers_abspath'], "%s.py" % loggerPrefix)
loggerNN = os.path.join(config['loggers_abspath'], "%s.py" % newLoggerName)
elog.info("%s: %s", newLoggerName, loggerNN)
(fr, fd) = (None, None)
try:
loggingConfigYaml = config['logger']['logging_config']
loggingConfigYamlDefault = "%s.0" % loggingConfigYaml
shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)
fr = open(loggingConfigYaml)
cfg = yaml.load(fr)
fr.close()
fr = None
fd = util.open_file(loggingConfigYaml)
cfg['loggers'][newLoggerName] = deepcopy(cfg['loggers'][loggerPrefix])
yaml.dump(cfg, fd, default_flow_style=False)
fd.close()
fd = None
shutil.copy(logger0, loggerNN)
shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)
elog.info("success: %s", newLoggerName)
except:
shutil.copy(loggingConfigYamlDefault, loggingConfigYaml)
elog.error("failed: %s", newLoggerName)
pass
finally:
if fr:
fr.close()
if fd:
fd.close()
pass
########################################################################
def remove_logger(config, found_workers):
import yaml
NN = len(found_workers) - 1
if NN == 0:
elog.warn("no logger can be removed")
return
loggerPrefix = found_workers[0].split('-')[0]
delLoggerName = "%s-%d" % (loggerPrefix, NN)
while delLoggerName not in found_workers and NN < LOGGER_WORKERS_MAX:
NN = NN + 1
delLoggerName = "%s-%d" % (loggerPrefix, NN)
if delLoggerName not in found_workers:
elog.warn("no logger can be removed")
return
# remove file loggerNN:
loggerNN = os.path.join(config['loggers_abspath'], "%s.py" % delLoggerName)
loggerNNPosition = os.path.join(config['loggers_abspath'], ".%s.position" % delLoggerName)
elog.info("%s: %s", delLoggerName, loggerNN)
(fr, fd) = (None, None)
try:
loggingConfigYaml = config['logger']['logging_config']
loggingConfigYamlDefault = "%s.0" % loggingConfigYaml
shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)
fr = open(loggingConfigYaml)
cfg = yaml.load(fr)
fr.close()
fr = None
del cfg['loggers'][delLoggerName]
fd = util.open_file(loggingConfigYaml)
yaml.dump(cfg, fd, default_flow_style=False)
fd.close()
fd = None
os.remove(loggerNN)
os.remove(loggerNNPosition)
shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)
elog.info("success: %s", delLoggerName)
except:
shutil.copy(loggingConfigYamlDefault, loggingConfigYaml)
elog.error("failed: %s", delLoggerName)
pass
finally:
if fr:
fr.close()
if fd:
fd.close()
pass
########################################################################
# main function
#
def main(config, parser):
import utils.logger as logger
(options, args) = parser.parse_args(args=None, values=None)
logConfigDict = logger.set_logger(config['logger'], options.log_path, options.log_level)
loggers = {}
if config['loggers'] and len(config['loggers']):
loggers = load_logger_workers('loggers', config['loggers'], {
'logger_config' : logConfigDict,
'logger_stash' : options.logger_stash,
'batch_rows' : options.batch_rows,
'end_time' : options.end_time,
'end_rowid' : options.end_rowid
})
if len(loggers) > LOGGER_WORKERS_MAX:
elog.error("too many logger workers. please increase LOGGER_WORKERS_MAX and try!")
exit(-1)
found_workers = list_logger_workers(logConfigDict, config['loggers_abspath'])
if options.list_logger_workers:
for logger_worker in found_workers:
elog.info("found worker: %s (%s/%s.py)", logger_worker, config['loggers_abspath'], logger_worker)
elog.force("total %d workers: %r", len(found_workers), found_workers)
return
if options.add_logger:
add_logger(config, found_workers)
return
if options.remove_logger:
remove_logger(config, found_workers)
return
if len(loggers) == 0 and options.force:
loggers = load_logger_workers('loggers', found_workers, {
'logger_config' : logConfigDict,
'logger_stash' : options.logger_stash,
'batch_rows' : options.batch_rows,
'end_time' : options.end_time,
'end_rowid' : options.end_rowid
})
if options.reset_logger_position:
if len(loggers):
reset_logger_position(loggers, config['loggers_abspath'], options.start_time, options.start_rowid)
else:
elog.error("--reset-position ignored: logger worker not found. use --force for all.")
pass
if options.startup:
if len(loggers):
startup(loggers, config)
else:
elog.error("--startup ignored: logger worker not found. use --force for all.")
pass
pass
########################################################################
# Usage:
# 1) 启动 weblogger
# $ sudo galaxy_loggers.py weblogger --startup
#
# 2) 启动 weblogger, weblogger2
# $ galaxy_loggers.py weblogger,weblogger2 --startup
# $ galaxy_loggers.py "weblogger, weblogger2" --startup
#
# 3) 启动所有 logger workers
# $ galaxy_loggers.py --startup --force
#
# 4) 显示所有 logger workers 列表
# $ galaxy_loggers.py --list
#
# 5) 重置 weblogger, weblogger2 的位置
# $ galaxy_loggers.py weblogger,weblogger2 --reset-position
#
# 6) 重置 weblogger 的位置在指定位置
# $ galaxy_loggers.py weblogger --reset-position --start-time="2000-01-01 00:00:00" --rowid=1000000000000
#
# 7) 重置所有插件的位置在默认位置
# $ galaxy_loggers.py --reset-position --force
#
# 8) 增加一个 loggerNN, NN 自动计算
# $ galaxy_loggers.py --add-logger
#
# 9) 删除最后增加的 loggerNN
# $ galaxy_loggers.py --remove-logger
#
# 10) 显示帮助
# $ galaxy_loggers.py --help
#
# 注意:
# 如果不是以 root 用户启动程序, 则应用程序本身的日志 (applog) 不会创建.
#
########################################################################
if __name__ == "__main__":
parser, group, optparse = util.use_parser_group(APPNAME, APPVER, APPHELP,
'%prog WORKERs [Options] ...\n WORKERs names for logger workers. (for instance: "weblogger,webloger2")')
group.add_option("--log-path",
action="store", dest="log_path", type="string", default=os.path.join(APPHOME, "applog"),
help="specify path to store application log (NOT logger data files)",
metavar="LOGPATH")
group.add_option("--log-level",
action="store", dest="log_level", type="string", default="DEBUG",
help="specify log level for logger: DEBUG, WARN, INFO, ERROR. default: DEBUG",
metavar="LOGLEVEL")
# you may change below for override default setting:
stash_dir = os.path.join(APPHOME, "tmp/stash")
group.add_option("--stash",
action="store", dest="logger_stash", type="string", default=stash_dir,
help="specify stash dir for storing logger data files. '" + stash_dir + "' (default)",
metavar="STASH")
group.add_option("--list",
action="store_true", dest="list_logger_workers", default=False,
help="list all logger workers")
group.add_option("--add-logger",
action="store_true", dest="add_logger", default=False,
help="add a new logger")
group.add_option("--remove-logger",
action="store_true", dest="remove_logger", default=False,
help="remove the last added logger")
group.add_option("--reset-position",
action="store_true", dest="reset_logger_position", default=False,
help="reset given worker's position")
group.add_option("--start-time",
action="store", dest="start_time", type="string", default="2000-01-01 00:00:00",
help="reset given worker's start time. '2000-01-01 00:00:00' default",
metavar="DATETIME")
group.add_option("--start-rowid",
action="store", dest="start_rowid", type=int, default=1,
help="reset given worker's start rowid. 1 default",
metavar="ROWID")
group.add_option("--end-time",
action="store", dest="end_time", type="string", default=None,
help="specify the end time to stop workers. None (default)",
metavar="DATETIME")
group.add_option("--end-rowid",
action="store", dest="end_rowid", type=int, default=None,
help="specify the end rowid to stop workers. None (default)",
metavar="ROWID")
group.add_option("--startup",
action="store_true", dest="startup", default=False,
help="startup given worker logger")
group.add_option("--batch-rows",
action="store", dest="batch_rows", type=int, default=5000,
help="specify batch rows for logger. 5000 default",
metavar="ROWS")
group.add_option("--force",
action="store_true", dest="force", default=False,
help="force apply on all workers")
if len(sys.argv) == 1:
elog.warn("WORKERs not specified")
print "--------------------------------"
parser.print_help()
print "--------------------------------"
exit(1)
workers = None
firstarg = sys.argv[1]
if not firstarg.startswith('-'):
workers = []
names = firstarg.split(',')
for name in names:
workers.append(name.strip(' '))
pass
config = {
'loggers' : workers,
'logger' : {
'logging_config': os.path.join(APPHOME, 'conf/logger.config'),
'file': APPNAME + '.log',
'name': 'main'
},
'loggers_abspath' : os.path.join(APPHOME, "loggers")
}
main(config, parser)
sys.exit(0)
|
role_handler.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import queue
import threading
import time
from auth.bkiam import SUPPORT_IAM
from auth.bkiam.sync import RoleSync
from auth.config import AUTO_CONFIGURED_ROLES, CAN_HANDOVER_MANAGER_ROLES
from auth.exceptions import ActionCheckErr, BKIAMPolicesCountLimitErr
from auth.handlers.event import AddManagersEvent, DeleteManagersEvent, EventController
from auth.models.auth_models import AUTH_STATUS, UserRole
from auth.models.base_models import RoleConfig
from common.log import logger
from django.utils.translation import ugettext_lazy as _
class RoleHandler:
SUPPORT_IAM = SUPPORT_IAM
BKIAM_TOP_LIMIT = 5000
GRANT = "grant"
REVOKE = "revoke"
def __init__(self):
pass
@classmethod
def update_role_batch(cls, username, permissions):
"""
批量添加角色权限
@params {String} username 当前操作人
@params {Dict[]} permssions 权限列表
@paramExampe
[
{'user_id': 'user1', 'role_id': 'project.manager', 'scope_id': '1', 'operate': 'grant'},
{'user_id': 'user2', 'role_id': 'project.manager', 'scope_id': '1', 'operate': 'revoke'}
]
@returnExample
[
({'user_id': 'user1', 'role_id': 'project.manager', 'scope_id': '1', 'operate': 'grant'}, True),
({'user_id': 'user2', 'role_id': 'project.manager', 'scope_id': '1', 'operate': 'revoke'}, True)
]
"""
threads = []
def wrap_execute_result(func, result_queue, perm):
"""
记录结果的装饰器函数
"""
def _deco(results, *args, **kwargs):
try:
func(*args, **kwargs)
result_queue.put((perm, True, None))
except Exception as err:
logger.exception(f"[Updata RoleAuth] Fail to update perm({perm}), {err}")
result_queue.put((perm, False, err.message))
return _deco
result_queue = queue.Queue()
for perm in permissions:
if perm["operate"] == cls.GRANT:
t = threading.Thread(
target=wrap_execute_result(cls.add_role, result_queue, perm),
args=(cls, username, perm["role_id"], perm["user_id"], perm["scope_id"]),
)
elif perm["operate"] == cls.REVOKE:
t = threading.Thread(
target=wrap_execute_result(cls.delete_role, result_queue, perm),
args=(cls, perm["role_id"], perm["user_id"], perm["scope_id"]),
)
else:
t = None
if t is not None:
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
results = []
while not result_queue.empty():
results.append(result_queue.get())
return results
@classmethod
def add_role(cls, username, role_id, user_id, scope_id):
"""
添加角色
@param username:
@param role_id:
@param user_id:
@param scope_id:
@return:
"""
# 已存在角色则直接跳过
if UserRole.objects.filter(scope_id=scope_id, role_id=role_id, user_id=user_id).exists():
return
try:
role = RoleConfig.objects.get(pk=role_id)
except RoleConfig.DoesNotExist:
raise ActionCheckErr()
UserRole.objects.create(scope_id=scope_id, role_id=role.role_id, user_id=user_id, created_by=username)
# 开启 IAM 同步,此处需要同步至 IAM
if cls.SUPPORT_IAM:
# 目前 IAM 对单用户对某一功能有权限的策略数上限 10000 条的限制
# 为了保障系统不受影响权限模块需要在用户与角色关系的数量加上 < 5000 条的限制
if UserRole.objects.filter(role_id=role.role_id, user_id=user_id).count() > cls.BKIAM_TOP_LIMIT:
raise BKIAMPolicesCountLimitErr(
_("暂不支持单个用户加入超过 {} 个同一资源角色," "请前往蓝鲸权限中心,使用用户组+属性授权的方式进行授权").format(cls.BKIAM_TOP_LIMIT)
)
RoleSync().grant(user_id, role_id, scope_id)
if role_id == "raw_data.manager":
cls._push_raw_data_event(
AddManagersEvent(
data_set_type="raw_data", data_set_id=scope_id, managers=[user_id], change_time=time.time()
)
)
@classmethod
def _push_raw_data_event(cls, event):
"""
推送表更事件
"""
try:
EventController().push_event(event)
logger.info(f"[PUSH EVENT] Succedd to push {event}")
except Exception as err:
logger.exception(f"[PUSH EVENT] Fail to push {event}, {err}")
@classmethod
def delete_role(cls, role_id, user_id, scope_id):
"""
删除角色
@param role_id:
@param user_id:
@param scope_id:
@return:
"""
count, _ = UserRole.objects.filter(scope_id=scope_id, role_id=role_id, user_id=user_id).delete()
if count > 0 and cls.SUPPORT_IAM:
RoleSync().revoke(user_id, role_id, scope_id)
if count > 0 and role_id == "raw_data.manager":
cls._push_raw_data_event(
DeleteManagersEvent(
data_set_type="raw_data", data_set_id=scope_id, managers=[user_id], change_time=time.time()
)
)
@classmethod
def clear_roles(cls, user_id):
"""
清除用户所有角色,设置非法状态位,不进行实际删除操作,便于记录,主要提供给审计使用
@param user_id:
@return:
"""
relations = UserRole.objects.filter(user_id=user_id).exclude(role_id__in=AUTO_CONFIGURED_ROLES)
instances = list(relations)
num = relations.update(auth_status=AUTH_STATUS.INVALID)
return num, instances
@classmethod
def handover_roles(cls, user_id, receiver):
"""
移交所有管理角色
"""
if user_id == receiver:
return 0, []
relations = UserRole.objects.filter(user_id=user_id).filter(role_id__in=CAN_HANDOVER_MANAGER_ROLES)
instances = list(relations)
num = relations.update(user_id=receiver)
# 检查原始数据的变更事件
raw_data_ids = [instance.scope_id for instance in instances if instance.role_id == "raw_data.manager"]
for raw_data_id in raw_data_ids:
cls._push_raw_data_event(
DeleteManagersEvent(
data_set_type="raw_data", data_set_id=raw_data_id, managers=[user_id], change_time=time.time()
)
)
cls._push_raw_data_event(
AddManagersEvent(
data_set_type="raw_data", data_set_id=raw_data_id, managers=[receiver], change_time=time.time()
)
)
return num, instances
@classmethod
def cmp_users(cls, user_ids, role_id, scope_id):
"""
比较两个列表,发现删除和增加的内容
@param user_ids:
@param role_id:
@param scope_id:
@return:
"""
olds = cls.list_users(role_id, scope_id=scope_id)
result = {
"add": [user_id for user_id in user_ids if user_id not in olds],
"delete": [user_id for user_id in olds if user_id not in user_ids],
}
return result
@classmethod
def list_users(cls, role_id, scope_id=None):
"""
查询某一角色底下的用户
"""
rela_set = UserRole.objects.filter(role_id=role_id, scope_id=scope_id)
return list(set(rela_set.values_list("user_id", flat=True)))
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1,100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
# print("get404")
# print(req.code)
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
# print("get")
# print(req)
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
# print("getheader:")
# print(req)
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
print("Sending POST!")
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
test_api.py
|
import mock
import errno
import re
import socket
import threading
import time
import warnings
from unittest import TestCase
import pytest
from ddtrace.api import API, Response
from ddtrace.compat import iteritems, httplib, PY3
from ddtrace.internal.runtime.container import CGroupInfo
from ddtrace.vendor.six.moves import BaseHTTPServer, socketserver
class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
error_message_format = '%(message)s\n'
error_content_type = 'text/plain'
@staticmethod
def log_message(format, *args): # noqa: A002
pass
class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
self.send_error(200, 'OK')
class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
# This server sleeps longer than our timeout
time.sleep(5)
class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler):
def do_PUT(self):
return
_HOST = '0.0.0.0'
_TIMEOUT_PORT = 8743
_RESET_PORT = _TIMEOUT_PORT + 1
class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
def _make_uds_server(path, request_handler):
server = UDSHTTPServer(path, request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture
def endpoint_uds_server(tmp_path):
server, thread = _make_uds_server(str(tmp_path / 'uds_server_socket'), _APIEndpointRequestHandlerTest)
try:
yield server
finally:
server.shutdown()
thread.join()
def _make_server(port, request_handler):
server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler)
t = threading.Thread(target=server.serve_forever)
# Set daemon just in case something fails
t.daemon = True
t.start()
return server, t
@pytest.fixture(scope='module')
def endpoint_test_timeout_server():
server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
@pytest.fixture(scope='module')
def endpoint_test_reset_server():
server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest)
try:
yield thread
finally:
server.shutdown()
thread.join()
class ResponseMock:
def __init__(self, content, status=200):
self.status = status
self.content = content
def read(self):
return self.content
def test_api_str():
api = API('localhost', 8126, https=True)
assert str(api) == 'https://localhost:8126'
api = API('localhost', 8126, '/path/to/uds')
assert str(api) == 'unix:///path/to/uds'
class APITests(TestCase):
def setUp(self):
# DEV: Mock here instead of in tests, before we have patched `httplib.HTTPConnection`
self.conn = mock.MagicMock(spec=httplib.HTTPConnection)
self.api = API('localhost', 8126)
def tearDown(self):
del self.api
del self.conn
def test_typecast_port(self):
api = API('localhost', u'8126')
self.assertEqual(api.port, 8126)
@mock.patch('logging.Logger.debug')
def test_parse_response_json(self, log):
test_cases = {
'OK': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'OK\n': dict(
js=None,
log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date',
),
'error:unsupported-endpoint': dict(
js=None,
log='Unable to parse Datadog Agent JSON response: \'error:unsupported-endpoint\'',
),
42: dict( # int as key to trigger TypeError
js=None,
log='Unable to parse Datadog Agent JSON response: 42',
),
'{}': dict(js={}),
'[]': dict(js=[]),
# Priority sampling "rate_by_service" response
('{"rate_by_service": '
'{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'): dict(
js=dict(
rate_by_service={
'service:,env:': 0.5,
'service:mcnulty,env:test': 0.9,
'service:postgres,env:test': 0.6,
},
),
),
' [4,2,1] ': dict(js=[4, 2, 1]),
}
for k, v in iteritems(test_cases):
log.reset_mock()
r = Response.from_http_response(ResponseMock(k))
js = r.get_json()
assert v['js'] == js
if 'log' in v:
log.assert_called_once()
msg = log.call_args[0][0] % log.call_args[0][1:]
assert re.match(v['log'], msg), msg
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close(self, HTTPConnection):
"""
When calling API._put
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
with warnings.catch_warnings(record=True) as w:
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
@mock.patch('ddtrace.compat.httplib.HTTPConnection')
def test_put_connection_close_exception(self, HTTPConnection):
"""
When calling API._put raises an exception
we close the HTTPConnection we create
"""
HTTPConnection.return_value = self.conn
# Ensure calling `request` raises an exception
self.conn.request.side_effect = Exception
with warnings.catch_warnings(record=True) as w:
with self.assertRaises(Exception):
self.api._put('/test', '<test data>', 1)
self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w))
self.conn.request.assert_called_once()
self.conn.close.assert_called_once()
def test_https():
conn = mock.MagicMock(spec=httplib.HTTPSConnection)
api = API('localhost', 8126, https=True)
with mock.patch('ddtrace.compat.httplib.HTTPSConnection') as HTTPSConnection:
HTTPSConnection.return_value = conn
api._put('/test', '<test data>', 1)
conn.request.assert_called_once()
conn.close.assert_called_once()
def test_flush_connection_timeout_connect():
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019)
response = api._flush(payload)
if PY3:
assert isinstance(response, (OSError, ConnectionRefusedError)) # noqa: F821
else:
assert isinstance(response, socket.error)
assert response.errno in (errno.EADDRNOTAVAIL, errno.ECONNREFUSED)
def test_flush_connection_timeout(endpoint_test_timeout_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _TIMEOUT_PORT)
response = api._flush(payload)
assert isinstance(response, socket.timeout)
def test_flush_connection_reset(endpoint_test_reset_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, _RESET_PORT)
response = api._flush(payload)
if PY3:
assert isinstance(response, (httplib.BadStatusLine, ConnectionResetError)) # noqa: F821
else:
assert isinstance(response, httplib.BadStatusLine)
def test_flush_connection_uds(endpoint_uds_server):
payload = mock.Mock()
payload.get_payload.return_value = 'foobar'
payload.length = 12
api = API(_HOST, 2019, uds_path=endpoint_uds_server.server_address)
response = api._flush(payload)
assert response.status == 200
@mock.patch('ddtrace.internal.runtime.container.get_container_info')
def test_api_container_info(get_container_info):
# When we have container information
# DEV: `get_container_info` will return a `CGroupInfo` with a `container_id` or `None`
info = CGroupInfo(container_id='test-container-id')
get_container_info.return_value = info
api = API(_HOST, 8126)
assert api._container_info is info
assert api._headers['Datadog-Container-Id'] == 'test-container-id'
# When we do not have container information
get_container_info.return_value = None
api = API(_HOST, 8126)
assert api._container_info is None
assert 'Datadog-Container-Id' not in api._headers
|
server.py
|
import asyncio
try:
import ujson as json
except ImportError:
import json
import os
import threading
import traceback
import rethinkdb as r
from flask import Flask, render_template, request, g, jsonify, make_response
from dashboard import dash
from utils.db import get_db, get_redis
from utils.ratelimits import ratelimit, endpoint_ratelimit
from utils.exceptions import BadRequest
from sentry_sdk import capture_exception
# Initial require, the above line contains our endpoints.
config = json.load(open('config.json'))
endpoints = None
app = Flask(__name__, template_folder='views', static_folder='views/assets')
app.register_blueprint(dash)
app.config['SECRET_KEY'] = config['client_secret']
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'
if 'sentry_dsn' in config:
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(config['sentry_dsn'],
integrations=[FlaskIntegration()])
@app.before_first_request
def init_app():
def run_gc_forever(loop):
asyncio.set_event_loop(loop)
try:
loop.run_forever()
except (SystemExit, KeyboardInterrupt):
loop.close()
gc_loop = asyncio.new_event_loop()
gc_thread = threading.Thread(target=run_gc_forever, args=(gc_loop,))
gc_thread.start()
g.gc_loop = gc_loop
from utils.endpoint import endpoints as endpnts
global endpoints
endpoints = endpnts
import endpoints as _ # noqa: F401
def require_authorization(func):
def wrapper(*args, **kwargs):
if r.table('keys').get(request.headers.get('authorization', '')).coerce_to('bool').default(False).run(get_db()):
return func(*args, **kwargs)
return jsonify({'status': 401, 'error': 'You are not authorized to access this endpoint'}), 401
return wrapper
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'rdb'):
g.rdb.close()
@app.route('/', methods=['GET'])
def index():
data = {}
for endpoint in endpoints:
data[endpoint] = {'hits': get_redis().get(endpoint + ':hits') or 0,
'avg_gen_time': endpoints[endpoint].get_avg_gen_time()}
return render_template('index.html', data=data)
@app.route('/endpoints.json', methods=['GET'])
def endpoints():
return jsonify({"endpoints": [{'name': x, 'parameters': y.params, 'ratelimit': f'{y.rate}/{y.per}s'} for x, y in endpoints.items()]})
@app.route('/documentation')
def docs():
return render_template('docs.html', url=request.host_url, data=sorted(endpoints.items()))
@app.route('/api/<endpoint>', methods=['GET', 'POST'])
@require_authorization
@ratelimit
def api(endpoint):
if endpoint not in endpoints:
return jsonify({'status': 404, 'error': 'Endpoint {} not found!'.format(endpoint)}), 404
if request.method == 'GET':
text = request.args.get('text', '')
avatars = [x for x in [request.args.get('avatar1', request.args.get('image', None)),
request.args.get('avatar2', None)] if x]
usernames = [x for x in [request.args.get('username1', None), request.args.get('username2', None)] if x]
kwargs = {}
for arg in request.args:
if arg not in ['text', 'username1', 'username2', 'avatar1', 'avatar2']:
kwargs[arg] = request.args.get(arg)
else:
if not request.is_json:
return jsonify({'status': 400, 'message': 'when submitting a POST request you must provide data in the '
'JSON format'}), 400
request_data = request.json
text = request_data.get('text', '')
avatars = list(request_data.get('avatars', list(request_data.get('images', []))))
usernames = list(request_data.get('usernames', []))
kwargs = {}
for arg in request_data:
if arg not in ['text', 'avatars', 'usernames']:
kwargs[arg] = request_data.get(arg)
cache = endpoints[endpoint].bucket
max_usage = endpoints[endpoint].rate
e_r = endpoint_ratelimit(auth=request.headers.get('Authorization', None), cache=cache, max_usage=max_usage)
if e_r['X-RateLimit-Remaining'] == -1:
x = make_response((jsonify({'status': 429, 'error': 'You are being ratelimited'}), 429,
{'X-RateLimit-Limit': e_r['X-RateLimit-Limit'],
'X-RateLimit-Remaining': 0,
'X-RateLimit-Reset': e_r['X-RateLimit-Reset'],
'Retry-After': e_r['Retry-After']}))
return x
if endpoint == 'profile':
if request.headers.get('Authorization', None) != config.get('memer_token', None):
return jsonify({"error": 'This endpoint is limited to Dank Memer', 'status': 403}), 403
try:
result = endpoints[endpoint].run(key=request.headers.get('authorization'),
text=text,
avatars=avatars,
usernames=usernames,
kwargs=kwargs)
except BadRequest as br:
traceback.print_exc()
if 'sentry_dsn' in config:
capture_exception(br)
return jsonify({'status': 400, 'error': str(br)}), 400
except IndexError as e:
traceback.print_exc()
if 'sentry_dsn' in config:
capture_exception(e)
return jsonify({'status': 400, 'error': str(e) + '. Are you missing a parameter?'}), 400
except Exception as e:
traceback.print_exc()
if 'sentry_dsn' in config:
capture_exception(e)
return jsonify({'status': 500, 'error': str(e)}), 500
result.headers.add('X-RateLimit-Limit', max_usage)
result.headers.add('X-RateLimit-Remaining', e_r['X-RateLimit-Remaining'])
result.headers.add('X-RateLimit-Reset', e_r['X-RateLimit-Reset'])
return result, 200
if __name__ == '__main__':
app.run(debug=False, use_reloader=False)
|
run_robot_library.py
|
import os
from tempfile import mkdtemp, mkstemp
from robot import run
from multiprocessing import Process
from allure_commons_test.report import AllureReport
def run_robot_with_allure(*args, **kwargs):
root = os.path.abspath(os.path.join(__file__, "..", ".."))
targets = map(lambda target: os.path.join(root, target), args)
tmp_path = mkdtemp(dir=os.environ.get('TEST_TMP', '/tmp'))
if "testplan" in kwargs:
# kwargs.pop("testplan")
kwargs["prerunmodifier"] = "allure_robotframework.testplan"
file, filename = mkstemp(suffix=".json", dir=tmp_path)
os.environ["ALLURE_TESTPLAN_PATH"] = filename
with os.fdopen(file, 'w') as tmp:
tmp.write(kwargs["testplan"])
def run_robot(path, **kw):
# ToDo: fix it (_core not works correctly with multiprocessing)
# import six
# import allure_commons
# if six.PY2:
# reload(allure_commons._core)
# else:
# import importlib
# importlib.reload(allure_commons._core)
#
#
from allure_robotframework import allure_robotframework
listener = allure_robotframework(logger_path=tmp_path)
stdout_file = os.path.abspath(os.path.join(tmp_path, "..", "stdout.txt"))
output_path = os.path.abspath(os.path.join(tmp_path, ".."))
with open(stdout_file, 'w+') as stdout:
options = {"listener": listener, "outputdir": output_path, "stdout": stdout, "extension": "rst"}
options.update(kw)
run(path, **options)
robot_process = Process(target=run_robot, args=targets, kwargs=kwargs)
robot_process.start()
robot_process.join()
os.environ.pop("ALLURE_TESTPLAN_PATH", None)
return AllureReport(tmp_path)
|
fifo_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.FIFOQueue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list { type: DT_FLOAT } } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 10 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testMultiQueueConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
enqueue_op = q.enqueue((10.0,))
enqueue_op.run()
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=(3, 2))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
self.assertEqual(1, q.size().eval())
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(
10, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertEqual(4, q.size().eval())
@test_util.run_in_graph_and_eager_modes
def testMultipleDequeues(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue_many([[1, 2, 3]]))
a, b, c = self.evaluate([q.dequeue(), q.dequeue(), q.dequeue()])
self.assertAllEqual(set([1, 2, 3]), set([a, b, c]))
@test_util.run_in_graph_and_eager_modes
def testQueuesDontShare(self):
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q.enqueue(1))
q2 = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
self.evaluate(q2.enqueue(2))
self.assertAllEqual(self.evaluate(q2.dequeue()), 2)
self.assertAllEqual(self.evaluate(q.dequeue()), 1)
def testEnqueueDictWithoutNames(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue({"a": 12.0})
with self.assertRaisesRegexp(ValueError, "must have names"):
q.enqueue_many({"a": [12.0, 13.0]})
def testParallelEnqueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float16)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(3, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
for elem, result in zip(elems, results):
self.assertEqual([elem], result)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
for i in xrange(len(elems)):
x_val, y_val = self.evaluate(dequeued_t)
x, y = elems[i]
self.assertEqual([x], x_val)
self.assertEqual([y], y_val)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual([0], q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError("specified shapes"):
q.dequeue_many(0).eval()
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
for i in range(8):
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_elems[i % 4], float_val)
self.assertAllEqual(int_elems[i % 4], int_val)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertAllEqual(float_elems[8], float_val)
self.assertAllEqual(int_elems[8], int_val)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
enqueue_op.run()
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[0:4], float_val)
self.assertAllEqual(int_elems[0:4], int_val)
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
float_val, int_val = self.evaluate(dequeued_t)
self.assertAllEqual(float_elems[4:8], float_val)
self.assertAllEqual(int_elems[4:8], int_val)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, (4, 4, 4, 4))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), ((),
(2)))
with self.assertRaises(ValueError):
q.enqueue(([1, 2], [2, 2]))
with self.assertRaises(ValueError):
q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]]))
def testBatchSizeMismatch(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32,
dtypes_lib.int32), ((), (), ()))
with self.assertRaises(ValueError):
q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3]))
with self.assertRaises(ValueError):
q.enqueue_many(
([1, 2, 3], [1, 2], array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many(
(array_ops.placeholder(dtypes_lib.int32), [1, 2], [1, 2, 3]))
def testEnqueueManyEmptyTypeConversion(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
enq = q.enqueue_many(([], []))
self.assertEqual(dtypes_lib.int32, enq.inputs[1].dtype)
self.assertEqual(dtypes_lib.float32, enq.inputs[2].dtype)
def testEnqueueWrongType(self):
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.float32), (
(), ()))
with self.assertRaises(ValueError):
q.enqueue((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
with self.assertRaises(ValueError):
q.enqueue_many((array_ops.placeholder(dtypes_lib.int32),
array_ops.placeholder(dtypes_lib.int32)))
def testEnqueueWrongShapeAtRuntime(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue((elems_ok, elems_bad))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Expected \[3,3\], got \[3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))})
def testEnqueueDequeueManyWrongShape(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32), (
(2, 2), (3, 3)))
elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32)
elems_bad = array_ops.placeholder(dtypes_lib.int32)
enqueue_op = q.enqueue_many((elems_ok, elems_bad))
dequeued_t = q.dequeue_many(2)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Shape mismatch in tuple component 1. "
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(1000, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(101)
enqueue_op.run()
close_op.run()
# Dequeue up to 101 items in parallel on 10 threads, from closed queue.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelEnqueueAndDequeue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(50, dtypes_lib.float32, shapes=())
initial_elements = [10.0] * 49
q.enqueue_many((initial_elements,)).run()
enqueue_op = q.enqueue((20.0,))
dequeued_t = q.dequeue()
def enqueue():
for _ in xrange(100):
self.evaluate(enqueue_op)
def dequeue():
for _ in xrange(100):
self.assertTrue(self.evaluate(dequeued_t) in (10.0, 20.0))
enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)]
dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for enqueue_thread in enqueue_threads:
enqueue_thread.start()
for dequeue_thread in dequeue_threads:
dequeue_thread.start()
for enqueue_thread in enqueue_threads:
enqueue_thread.join()
for dequeue_thread in dequeue_threads:
dequeue_thread.join()
# Dequeue the initial count of elements to clean up.
cleanup_elems = q.dequeue_many(49).eval()
for elem in cleanup_elems:
self.assertTrue(elem in (10.0, 20.0))
def testMixtureOfEnqueueAndEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
enqueue_op = q.enqueue((enqueue_placeholder,))
enqueuemany_placeholder = array_ops.placeholder(
dtypes_lib.int32, shape=(None,))
enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,))
dequeued_t = q.dequeue()
close_op = q.close()
def dequeue():
for i in xrange(250):
self.assertEqual(i, self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
elements_enqueued = 0
while elements_enqueued < 250:
# With equal probability, run Enqueue or enqueue_many.
if random.random() > 0.5:
enqueue_op.run({enqueue_placeholder: elements_enqueued})
elements_enqueued += 1
else:
count = random.randint(0, min(20, 250 - elements_enqueued))
range_to_enqueue = np.arange(
elements_enqueued, elements_enqueued + count, dtype=np.int32)
enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue})
elements_enqueued += count
close_op.run()
dequeue_thread.join()
self.assertEqual(0, q.size().eval())
def testMixtureOfDequeueAndDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shapes=())
enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),))
dequeued_t = q.dequeue()
count_placeholder = array_ops.placeholder(dtypes_lib.int32, shape=())
dequeuemany_t = q.dequeue_many(count_placeholder)
def enqueue():
self.evaluate(enqueue_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
elements_dequeued = 0
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
expected_range = np.arange(
elements_dequeued, elements_dequeued + count, dtype=np.int32)
self.assertAllEqual(expected_range,
dequeuemany_t.eval({
count_placeholder: count
}))
elements_dequeued += count
q.close().run()
enqueue_thread.join()
self.assertEqual(0, q.size().eval())
def testBlockingDequeueMany(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertAllEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.FIFOQueue(100, dtypes_lib.int32, ())
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.FIFOQueue(total_count, dtypes_lib.int32, ())
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def dequeue():
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems, self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueManyButNotAllFromClosedQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
def dequeue():
self.assertAllEqual(elems[:3], self.evaluate(dequeued_t))
self.assertAllEqual(elems[3:], self.evaluate(dequeued_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self):
with ops.Graph().as_default(), self.session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32, ())
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue()
def enqueue():
sess.run(enqueue_op)
def dequeue():
self.assertAllEqual(elems[0:3], sess.run(dequeued_t))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(dequeued_t)
self.assertEqual(elems[3], sess.run(cleanup_dequeue_t))
def close():
sess.run(close_op)
enqueue_thread = self.checkedThread(target=enqueue)
enqueue_thread.start()
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_thread = self.checkedThread(target=close)
close_thread.start()
enqueue_thread.join()
dequeue_thread.join()
close_thread.join()
def testClosedBlockingDequeueManyRestoresPartialBatch(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, (dtypes_lib.float32, dtypes_lib.float32), (
(), ()))
elems_a = [1.0, 2.0, 3.0]
elems_b = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems_a, elems_b))
dequeued_a_t, dequeued_b_t = q.dequeue_many(4)
cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def dequeue():
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate([dequeued_a_t, dequeued_b_t])
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
# Test that the elements in the partially-dequeued batch are
# restored in the correct order.
for elem_a, elem_b in zip(elems_a, elems_b):
val_a, val_b = self.evaluate([cleanup_dequeue_a_t, cleanup_dequeue_b_t])
self.assertEqual(elem_a, val_a)
self.assertEqual(elem_b, val_b)
self.assertEqual(0, q.size().eval())
def testBlockingDequeueManyFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, ())
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
def testBlockingEnqueueBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed once the dequeue op runs.
self.evaluate(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.2)
def close():
self.evaluate(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.session() as sess:
q = data_flow_ops.FIFOQueue(4, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
sess.run(blocking_enqueue_op)
enqueue_thread = self.checkedThread(target=blocking_enqueue)
enqueue_thread.start()
# The close_op should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def close():
sess.run(close_op)
close_thread = self.checkedThread(target=close)
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(1, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
size_t = q.size()
enqueue_op.run()
for _ in range(500):
self.assertEqual(size_t.eval(), [1])
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1.enqueue((10.0,)).run()
q2 = data_flow_ops.FIFOQueue(
1, dtypes_lib.float32, shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q2.dequeue().eval(), [10.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), [1])
self.assertEqual(q2_size_t.eval(), [1])
self.assertEqual(q1.dequeue().eval(), [20.0])
self.assertEqual(q1_size_t.eval(), [0])
self.assertEqual(q2_size_t.eval(), [0])
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.int32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_2 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32, shared_name="q_f")
q_f_2 = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_f_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in xrange(num_queues):
qlist.append(data_flow_ops.FIFOQueue(10, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.FIFOQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
q2 = data_flow_ops.FIFOQueue(15, dtypes_lib.float32)
enq_q = data_flow_ops.FIFOQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(dequeue_many_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
sess.run(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.session() as sess:
q_empty = data_flow_ops.FIFOQueue(5, dtypes_lib.float32, ())
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
q_full = data_flow_ops.FIFOQueue(5, dtypes_lib.float32)
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
# Create a new session that hasn't been closed, so cached_session
# isn't messed up.
with self.session() as sess:
pass
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(5, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertAllEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(2, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertAllEqual(elem, results)
def testDtypes(self):
with self.cached_session() as sess:
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.int64,
dtypes_lib.uint16, dtypes_lib.bool, dtypes_lib.complex64,
dtypes_lib.complex128
]
shape = (32, 4, 128)
q = data_flow_ops.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes))
input_tuple = []
for dtype in dtypes:
np_dtype = dtype.as_numpy_dtype
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes_lib.bool:
np_array = np_array > 0
elif dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
input_tuple.append(np_array)
q.enqueue_many(input_tuple).run()
output_tuple_t = q.dequeue_many(32)
output_tuple = self.evaluate(output_tuple_t)
for (input_elem, output_elem) in zip(input_tuple, output_tuple):
self.assertAllEqual(input_elem, output_elem)
def testDequeueEnqueueFail(self):
with self.cached_session() as session:
q = data_flow_ops.FIFOQueue(10, [dtypes_lib.int32], shapes=[()])
a = q.dequeue()
b = control_flow_ops.Assert(False, ["Before enqueue"])
with ops.control_dependencies([b]):
c = q.enqueue(33)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Before enqueue" in str(e)):
session.run([a, c])
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueDictTest(test.TestCase):
def testConstructor(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "j"),
shared_name="foo",
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: 'foo' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "j"], q.names)
def testConstructorWithShapes(self):
with ops.Graph().as_default():
q = data_flow_ops.FIFOQueue(
5, (dtypes_lib.int32, dtypes_lib.float32),
names=("i", "f"),
shapes=(tensor_shape.TensorShape([1, 1, 2, 3]),
tensor_shape.TensorShape([5, 8])),
name="Q")
self.assertTrue(isinstance(q.queue_ref, ops.Tensor))
self.assertProtoEquals("""
name:'Q' op:'FIFOQueueV2'
attr { key: 'component_types' value { list {
type: DT_INT32 type : DT_FLOAT
} } }
attr { key: 'shapes' value { list {
shape { dim { size: 1 }
dim { size: 1 }
dim { size: 2 }
dim { size: 3 } }
shape { dim { size: 5 }
dim { size: 8 } }
} } }
attr { key: 'capacity' value { i: 5 } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.queue_ref.op.node_def)
self.assertEqual(["i", "f"], q.names)
def testEnqueueDequeueOneComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, dtypes_lib.float32, shapes=((),), names="f")
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue(10.0)
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0,))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"f": 10.0, "s": "aa"})
enqueue_op = q.enqueue({"f": 10.0})
enqueue_op2 = q.enqueue({"f": 20.0})
enqueue_op3 = q.enqueue({"f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many([40.0, 50.0])
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": 12})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]})
enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
f = sess.run(dequeue["f"])
self.assertEqual(10.0, f)
f = sess.run(dequeue_2["f"])
self.assertEqual([20.0, 30.0], list(f))
f = sess.run(dequeue_2["f"])
self.assertEqual([40.0, 50.0], list(f))
def testEnqueueDequeueMultipleComponent(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(
10, (dtypes_lib.float32, dtypes_lib.int32, dtypes_lib.string),
shapes=((), (), ()),
names=("f", "i", "s"))
# Verify that enqueue() checks that when using names we must enqueue a
# dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op = q.enqueue((10.0, 123, "aa"))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"x": 10.0})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 12, "s": "aa"})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0})
enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0})
enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0})
enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0})
# Verify that enqueue_many() checks that when using names we must enqueue
# a dictionary.
with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"):
enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"]))
# The dictionary keys must match the queue component names.
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]})
with self.assertRaisesRegexp(ValueError, "match names of Queue"):
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"],
"x": [1, 2]
})
enqueue_op4 = q.enqueue_many({
"f": [40.0, 50.0],
"i": [126, 127],
"s": ["dd", "ee"]
})
dequeue = q.dequeue()
dequeue_2 = q.dequeue_many(2)
self.evaluate(enqueue_op)
self.evaluate(enqueue_op2)
self.evaluate(enqueue_op3)
self.evaluate(enqueue_op4)
i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]])
self.assertEqual(123, i)
self.assertEqual(10.0, f)
self.assertEqual(compat.as_bytes("aa"), s)
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([124, 125], list(i))
self.assertTrue([20.0, 30.0], list(f))
self.assertTrue([compat.as_bytes("bb"), compat.as_bytes("cc")], list(s))
i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]])
self.assertEqual([126, 127], list(i))
self.assertTrue([40.0, 50.0], list(f))
self.assertTrue([compat.as_bytes("dd"), compat.as_bytes("ee")], list(s))
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueWithTimeoutTest(test.TestCase):
def testDequeueWithTimeout(self):
with self.session(
config=config_pb2.ConfigProto(operation_timeout_in_ms=20)) as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes(""), q.queue_ref.op.get_attr("container"))
dequeued_t = q.dequeue()
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
self.evaluate(dequeued_t)
def testReusableAfterTimeout(self):
with self.cached_session() as sess:
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
dequeued_t = q.dequeue()
enqueue_op = q.enqueue(37)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Timed out waiting for notification"):
sess.run(dequeued_t, options=config_pb2.RunOptions(timeout_in_ms=10))
self.evaluate(enqueue_op)
self.assertEqual(37, self.evaluate(dequeued_t))
@test_util.run_v1_only("FIFOQueue removed from v2")
class QueueContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
with ops.container("test"):
q = data_flow_ops.FIFOQueue(10, dtypes_lib.float32)
self.assertEqual(
compat.as_bytes("test"), q.queue_ref.op.get_attr("container"))
@test_util.run_v1_only("FIFOQueue removed from v2")
class FIFOQueueBenchmark(test.Benchmark):
"""Benchmark FIFOQueue operations."""
def _build_graph(self):
"""Builds a graph that enqueues and dequeues a single float.
Returns:
A tuple with the graph init tensor and graph output tensor.
"""
q = data_flow_ops.FIFOQueue(1, "float")
init = q.enqueue(1.0)
x = q.dequeue()
q_inc = q.enqueue(x + 1)
return init, q_inc
# TODO(suharshs): Add benchmarks for:
# - different capacities of the queue
# - various sizes of tensors
# - enqueue_many, dequeue_many
def _run(self, num_iters):
"""Benchmarks enqueueing and dequeueing from a FIFOQueue.
Args:
num_iters: The number of iterations to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
init, output = self._build_graph()
with session_lib.Session(graph=graph) as session:
init.run()
_ = session.run(output) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(output)
duration = time.time() - start_time
print("%f secs per enqueue-dequeue" % (duration / num_iters))
self.report_benchmark(
name="fifo_queue", iters=num_iters, wall_time=duration / num_iters)
return duration
if __name__ == "__main__":
test.main()
|
main_window.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import csv
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
import base64
from functools import partial
from collections import OrderedDict
from typing import List
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
ExcessiveFee, UserCancelled, InvalidPassword,
bh2u, bfh, format_fee_satoshis, Weak,
print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands, cashacct
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
from electroncash.contacts import Contact
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel, PopupWidget
from . import cashacctqt
from .util import *
try:
# pre-load QtMultimedia at app start, if possible
# this is because lazy-loading it from within Python
# callbacks led to crashes on Linux, likely due to
# bugs in PyQt5 (crashes wouldn't happen when testing
# with PySide2!).
from PyQt5.QtMultimedia import QCameraInfo
del QCameraInfo # defensive programming: not always available so don't keep name around
except ImportError as e:
pass # we tried to pre-load it, failure is ok; camera just won't be available
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
ca_address_default_changed_signal = pyqtSignal(object) # passes cashacct.Info object to slot, which is the new default. Mainly emitted by address_list and address_dialog
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
self.config = config = gui_object.config
assert self.wallet and self.config and self.gui_object
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.cashaddr_toggled_signal = self.gui_object.cashaddr_toggled_signal # alias for backwards compatibility for plugins -- this signal used to live in each window and has since been refactored to gui-object where it belongs (since it's really an app-global setting)
self.force_use_single_change_addr = None # this is set by the CashShuffle plugin to a single string that will go into the tool-tip explaining why this preference option is disabled (see self.settings_dialog)
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self._shortcuts = Weak.Set() # keep track of shortcuts and disable them on close
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.converter_tab = self.create_converter_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.svg"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak.ref(tabs) # We use a weak reference here to help along python gc of QShortcut children: prevent the lambdas below from holding a strong ref to self.
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+W"), self, self.close) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+Q"), self, self.close) )
# Below is now addded to the menu as Ctrl+R but we'll also support F5 like browsers do
self._shortcuts.add( QShortcut(QKeySequence("F5"), self, self.update_wallet) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() - 1)%wrtabs().count())) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() + 1)%wrtabs().count())) )
for i in range(tabs.count()):
self._shortcuts.add( QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs() and wrtabs().setCurrentIndex(i)) )
self.gui_object.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet()
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee', 'ca_verified_tx', 'ca_verification_failed']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
#
#try:
# # Amaury's recommendation -- only remind a subset of users to enable it.
# self.remind_cashshuffle_enabled = bool(int.from_bytes(bytes.fromhex(self.wallet.get_public_key(self.wallet.get_addresses()[0])), byteorder='big') & 0x3)
#except (AttributeError, ValueError, TypeError):
# # wallet lacks the get_public_key method
# self.remind_cashshuffle_enabled = False
self.remind_cashshuffle_enabled = False # For now globally disabled
#QTimer.singleShot(300, lambda: weakSelf() and weakSelf().do_cash_shuffle_reminder())
#
# do this immediately after this event handler finishes -- noop on everything but linux
QTimer.singleShot(0, lambda: weakSelf() and weakSelf().gui_object.lin_win_maybe_show_highdpi_caveat_msg(weakSelf()))
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab):
show = self.tabs.indexOf(tab) == -1
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
if args[0] is self.wallet:
self.network_signal.emit(event, args)
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
elif event in ('ca_verified_tx', 'ca_verification_failed'):
if args[0] is self.wallet.cashacct:
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
elif event in ('ca_verified_tx', 'ca_verification_failed'):
pass
elif event == 'verified2':
pass
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
def load_wallet(self):
self.wallet.thread = TaskThread(self, self.on_error, name = self.wallet.diagnostic_name() + '/Wallet')
self.update_recently_visited(self.wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
run_hook('load_wallet', self.wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of Electron Cash before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
self.wallet.storage.write() # make sure file is committed to disk
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
destroyed_print_error(menubar)
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open") + "...", self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore") + "...", self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy") + "...", self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete") + "...", self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information") + "...", self.show_master_public_keys, QKeySequence("Ctrl+I"))
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password") + "...", self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed") + "...", self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep") + "...", self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import") + "...", self.do_import_privkey)
self.export_menu = self.private_keys_menu.addMenu(_("&Export"))
self.export_menu.addAction(_("&WIF Plaintext") + "...", self.export_privkeys_dialog)
self.export_menu.addAction(_("&BIP38 Encrypted") + "...", self.export_bip38_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses") + "...", self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild history"), self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("&Scan beyond gap..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import") + "...", self.do_import_labels)
labels_menu.addAction(_("&Export") + "...", self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New") + "...", self.new_contact_dialog)
contacts_menu.addAction(_("Import") + "...", lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export") + "...", lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import") + "...", lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export") + "...", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search, QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("&Refresh GUI"), self.update_wallet, QKeySequence("Ctrl+R"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
prefs_tit = _("Preferences") + "..."
a = tools_menu.addAction(prefs_tit, self.settings_dialog, QKeySequence("Ctrl+,") ) # Note: on macOS this hotkey sequence won't be shown in the menu (since it's reserved by the system), but will still work. :/
if sys.platform == 'darwin':
# This turns off the heuristic matching based on name and keeps the
# "Preferences" action out of the application menu and into the
# actual menu we specified on macOS.
a.setMenuRole(QAction.NoRole)
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network") + "...", lambda: gui_object.show_network_dialog(weakSelf()), QKeySequence("Ctrl+K"))
tools_menu.addAction(_("Optional &Features") + "...", self.internal_plugins_dialog, QKeySequence("Shift+Ctrl+P"))
tools_menu.addAction(_("Installed &Plugins") + "...", self.external_plugins_dialog, QKeySequence("Ctrl+P"))
if sys.platform.startswith('linux'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware wallet support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message") + "...", self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message") + "...", self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany, QKeySequence("Ctrl+M"))
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("From &file") + "...", self.do_process_from_file)
raw_transaction_menu.addAction(_("From &text") + "...", self.do_process_from_text, QKeySequence("Ctrl+T"))
raw_transaction_menu.addAction(_("From the &blockchain") + "...", self.do_process_from_txid, QKeySequence("Ctrl+B"))
raw_transaction_menu.addAction(_("From &QR code") + "...", self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
tools_menu.addSeparator()
if ColorScheme.dark_scheme and sys.platform != 'darwin': # use dark icon in menu except for on macOS where we can't be sure it will look right due to the way menus work on macOS
icon = QIcon(":icons/cashacct-button-darkmode.png")
else:
icon = QIcon(":icons/cashacct-logo.png")
tools_menu.addAction(icon, _("Lookup &Cash Account..."), self.lookup_cash_account_dialog, QKeySequence("Ctrl+L"))
tools_menu.addAction(icon, _("&Register Cash Account..."), lambda: self.register_new_cash_account(addr='pick'), QKeySequence("Ctrl+G"))
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("About Qt"), self.app.aboutQt)
help_menu.addAction(_("&Check for updates..."), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official website"), lambda: webopen("https://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{}:{}?message=donation for {}'
.format(networks.net.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash",
"<p><font size=+3><b>Electron Cash</b></font></p><p>" + _("Version") + f" {self.wallet.electrum_version}" + "</p>" +
'<p><span style="font-size:11pt; font-weight:500;">' + "Copyright © 2017-2019<br>Electron Cash LLC & The Electron Cash Developers" + "</span></p>" +
'<p><span style="font-weight:200;">' +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system.") +
"</span></p>"
)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/Electron-Cash/Electron-Cash/issues\">https://github.com/Electron-Cash/Electron-Cash/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
sats_per_byte = format_fee_satoshis(fee_rate/1000, max(self.num_zeros, 1))
return _('{sats_per_byte} sat/byte').format(sats_per_byte=sats_per_byte)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
extra = run_hook("balance_label_extra", self)
if isinstance(extra, str) and extra:
text += " [{}]".format(extra)
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text += " " + ( _("[%d unverified TXs]") % n_unverif )
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
self.status_button.setStatusTip( status_tip )
self.update_cashshuffle_icon()
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr, *, parent=None):
parent = parent or self.top_level_window()
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.gui_object.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
# Cash Account for this address (if any)
msg = _("The Cash Account (if any) associated with this address. It doesn't get saved with the request, but it is shown here for your convenience.\n\nYou may use the Cash Accounts button to register a new Cash Account for this address.")
label = HelpLabel(_('Cash Accoun&t'), msg)
class CashAcctE(ButtonsLineEdit):
my_network_signal = pyqtSignal(str, object)
''' Inner class encapsulating the Cash Account Edit.s
Note:
- `slf` in this class is this instance.
- `self` is wrapping class instance. '''
def __init__(slf, *args):
super().__init__(*args)
slf.font_default_size = slf.font().pointSize()
icon = ":icons/cashacct-button-darkmode.png" if ColorScheme.dark_scheme else ":icons/cashacct-logo.png"
slf.ca_but = slf.addButton(icon, self.register_new_cash_account, _("Register a new Cash Account for this address"))
slf.ca_copy_b = slf.addCopyButton()
slf.setReadOnly(True)
slf.info = None
slf.cleaned_up = False
self.network_signal.connect(slf.on_network_qt)
slf.my_network_signal.connect(slf.on_network_qt)
if self.wallet.network:
self.wallet.network.register_callback(slf.on_network, ['ca_updated_minimal_chash'])
def clean_up(slf):
slf.cleaned_up = True
try: self.network_signal.disconnect(slf.on_network_qt) # need to disconnect parent signals due to PyQt bugs, see #1531
except TypeError: pass
if self.wallet.network:
self.wallet.network.unregister_callback(slf.on_network)
def set_cash_acct(slf, info: cashacct.Info = None, minimal_chash = None):
if not info and self.receive_address:
minimal_chash = None
ca_list = self.wallet.cashacct.get_cashaccounts(domain=[self.receive_address])
ca_list.sort(key=lambda x: ((x.number or 0), str(x.collision_hash)))
info = self.wallet.cashacct.get_address_default(ca_list)
if info:
slf.ca_copy_b.setDisabled(False)
f = slf.font(); f.setItalic(False); f.setPointSize(slf.font_default_size); slf.setFont(f)
slf.setText(info.emoji + " " + self.wallet.cashacct.fmt_info(info, minimal_chash=minimal_chash))
else:
slf.setText(pgettext("Referencing CashAccount", "None"))
f = slf.font(); f.setItalic(True); f.setPointSize(slf.font_default_size-1); slf.setFont(f)
slf.ca_copy_b.setDisabled(True)
slf.info = info
def on_copy(slf):
''' overrides super class '''
QApplication.instance().clipboard().setText(slf.text()[3:] + ' ' + slf.text()[:1]) # cut off the leading emoji, and add it to the end
QToolTip.showText(QCursor.pos(), _("Cash Account copied to clipboard"), slf)
def on_network_qt(slf, event, args=None):
''' pick up cash account changes and update receive tab. Called
from GUI thread. '''
if not args or self.cleaned_up or slf.cleaned_up or args[0] != self.wallet.cashacct:
return
if event == 'ca_verified_tx' and self.receive_address and self.receive_address == args[1].address:
slf.set_cash_acct()
elif event == 'ca_updated_minimal_chash' and slf.info and slf.info.address == args[1].address:
slf.set_cash_acct()
def on_network(slf, event, *args):
if event == 'ca_updated_minimal_chash' and args[0] == self.wallet.cashacct:
slf.my_network_signal.emit(event, args)
def showEvent(slf, e):
super().showEvent(e)
if e.isAccepted():
slf.set_cash_acct()
self.cash_account_e = CashAcctE()
label.setBuddy(self.cash_account_e)
grid.addWidget(label, 1, 0)
grid.addWidget(self.cash_account_e, 1, 1, 1, -1)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_message_e, 2, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_opreturn_e, 3, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 3, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
label = QLabel(_('Requested &amount'))
label.setBuddy(self.receive_amount_e)
grid.addWidget(label, 4, 0)
grid.addWidget(self.receive_amount_e, 4, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 4, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([_(i[0]) for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 5, 0)
grid.addWidget(self.expires_combo, 5, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 5, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(slf, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 6, 2, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(slf, e):
super().showEvent(e)
if e.isAccepted():
wslf = weakSelf()
if wslf:
wslf.check_and_reset_receive_address_if_needed()
w = ReceiveTab()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
self.cash_account_e.set_cash_acct()
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit bitcoincash: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
else:
# Otherwise proceed as normal, prepending bitcoincash: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
# NB: the translators hopefully will not have too tough a time with this
# *fingers crossed* :)
msg = "<span style=\"font-weight:400;\">" + _('Recipient of the funds.') + " " + \
_("You may enter:"
"<ul>"
"<li> Bitcoin Cash <b>Address</b> <b>★</b>"
"<li> Bitcoin Legacy <b>Address</b> <b>★</b>"
"<li> <b>Cash Account</b> <b>★</b> e.g. <i>satoshi#123</i>"
"<li> <b>Contact name</b> <b>★</b> from the Contacts tab"
"<li> <b>CoinText</b> e.g. <i>cointext:+1234567</i>"
"<li> <b>OpenAlias</b> e.g. <i>satoshi@domain.com</i>"
"</ul><br>"
" <b>★</b> = Supports <b>pay-to-many</b>, where"
" you may optionally enter multiple lines of the form:"
"</span><br><pre>"
" recipient1, amount1 \n"
" recipient2, amount2 \n"
" etc..."
"</pre>")
self.payto_label = payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
qmark_help_but = HelpButton(msg, button_text='', fixed_size=False, icon=QIcon(qmark), custom_parent=self)
self.payto_e.addWidget(qmark_help_but, index=0)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('&Amount'), msg)
amount_label.setBuddy(self.amount_e)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 6, 0)
grid.addWidget(self.fee_slider, 6, 1)
grid.addWidget(self.fee_custom_lbl, 6, 1)
grid.addWidget(self.fee_e, 6, 2)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.cointext_button = EnterButton(_("Coin&Text"), self.do_cointext)
self.cointext_button.setToolTip(_('Process CoinText, transforming it into a BIP70 payment request.'))
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addWidget(self.cointext_button)
grid.addLayout(buttons, 7, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide cointext button, etc
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
@staticmethod
def output_for_opreturn_rawhex(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturnError(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturnTooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput.protocol_factory(op_return_script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, name):
item = self.from_list.currentItem()
if (item and item.data(0, Qt.UserRole) == name
and not item.data(0, Qt.UserRole+1) ):
i = self.from_list.indexOfTopLevelItem(item)
try:
self.pay_from.pop(i)
except IndexError:
# The list may contain items not in the pay_from if added by a
# plugin using the spendable_coin_filter hook
pass
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
name = item.data(0, Qt.UserRole)
action = menu.addAction(_("Remove"), lambda: self.from_list_delete(name))
if item.data(0, Qt.UserRole+1):
action.setText(_("Not Removable"))
action.setDisabled(True)
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self, *, spendable=None):
''' Optional kwarg spendable indicates *which* of the UTXOs in the
self.pay_from list are actually spendable. If this arg is specifid,
coins in the self.pay_from list that aren't also in the 'spendable' list
will be grayed out in the UI, to indicate that they will not be used.
Otherwise all coins will be non-gray (default).
(Added for CashShuffle 02/23/2019) '''
sel = self.from_list.currentItem() and self.from_list.currentItem().data(0, Qt.UserRole)
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def name(x):
return "{}:{}".format(x['prevout_hash'], x['prevout_n'])
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
def new(item, is_unremovable=False):
ret = QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])
ret.setData(0, Qt.UserRole, name(item))
ret.setData(0, Qt.UserRole+1, is_unremovable)
return ret
for item in self.pay_from:
twi = new(item)
if spendable is not None and item not in spendable:
grayify(twi)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
if spendable is not None: # spendable may be None if no plugin filtered coins.
for item in spendable:
# append items added by the plugin to the spendable list
# at the bottom. These coins are marked as "not removable"
# in the UI (the plugin basically insisted these coins must
# be spent with the other coins in the list for privacy).
if item not in self.pay_from:
twi = new(item, True)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
def get_contact_payto(self, contact : Contact) -> str:
assert isinstance(contact, Contact)
_type, label = contact.type, contact.name
emoji_str = ''
mod_type = _type
mine_str = ''
if _type.startswith('cashacct'): # picks up cashacct and the cashacct_W pseudo-contacts
if _type == 'cashacct_T':
# temporary "pending verification" registration pseudo-contact. Never offer it as a completion!
return None
mod_type = 'cashacct'
info = self.wallet.cashacct.get_verified(label)
if info:
emoji_str = f' {info.emoji}'
if _type == 'cashacct_W':
mine_str = ' [' + _('Mine') + '] '
else:
self.print_error(label, "not found")
# could not get verified contact, don't offer it as a completion
return None
elif _type == 'openalias':
return contact.address
return label + emoji_str + ' ' + mine_str + '<' + contact.address + '>' if mod_type in ('address', 'cashacct') else None
def update_completions(self):
l = []
for contact in self.contact_list.get_full_contacts(include_pseudo=True):
s = self.get_contact_payto(contact)
if s is not None: l.append(s)
l.sort(key=lambda x: x.lower()) # case-insensitive sort
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False;
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True;
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and not self.payto_e.validated:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
_cointext_popup_kill_tab_changed_connection = None
def do_cointext(self):
''' This is called by the cointext button 'clicked' signal and it
initiates the processing of the cointext URL. This should only be
called if self.payto_e.cointext is not None, otherwise it will do
nothing. '''
if self.payto_e.cointext and not self.payment_request:
if self.gui_object.warn_if_no_network(self):
return
phone = self.payto_e.cointext
sats = self.amount_e.get_amount()
if sats:
url = "https://pay.cointext.io/p/{}/{}".format(phone, sats)
def get_cointext_pr():
# Runs in thread
self.print_error("CoinText URL", url)
pr = paymentrequest.get_payment_request(url) # raises on error
return pr
def on_success(pr):
# Runs in main thread
if pr:
if pr.error:
self.print_error("CoinText ERROR", pr.error)
self.show_error(_("There was an error processing the CoinText. Please check the phone number and try again."))
return
self.print_error("CoinText RESULT", repr(pr))
self.prepare_for_payment_request()
def show_popup():
if not self.send_button.isVisible():
# likely a watching-only wallet, in which case
# showing the popup label for the send button
# leads to unspecified position for the button
return
show_it = partial(
ShowPopupLabel,
text=_("Please review payment before sending CoinText"),
target=self.send_button, timeout=15000.0,
name="CoinTextPopup",
pointer_position=PopupWidget.LeftSide,
activation_hides=True, track_target=True,
dark_mode = ColorScheme.dark_scheme
)
if not self._cointext_popup_kill_tab_changed_connection:
# this ensures that if user changes tabs, the popup dies
# ... it is only connected once per instance lifetime
self._cointext_popup_kill_tab_changed_connection = self.tabs.currentChanged.connect(lambda: KillPopupLabel("CoinTextPopup"))
QTimer.singleShot(0, show_it)
pr.request_ok_callback = show_popup
self.on_pr(pr)
def on_error(exc):
self.print_error("CoinText EXCEPTION", repr(exc))
self.on_error(exc)
WaitingDialog(self.top_level_window(),
_("Retrieving CoinText info, please wait ..."),
get_cointext_pr, on_success, on_error)
else:
self.show_error(_('CoinText: Please specify an amount'))
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("You are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc, *, callback=None):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
if not ack_status:
if ack_msg == "no url":
# "no url" hard-coded in send_payment method
# it means merchant doesn't need the tx sent to him
# since he didn't specify a POST url.
# so we just broadcast and rely on that result status.
ack_msg = None
else:
return False, ack_msg
# at this point either ack_status is True or there is "no url"
# and we proceed anyway with the broadcast
status, msg = self.network.broadcast_transaction(tx)
# figure out what to return...
msg = ack_msg or msg # prefer the merchant's ack_msg over the broadcast msg, but fallback to broadcast msg if no ack_msg.
status = bool(ack_status or status) # if both broadcast and merchant ACK failed -- it's a failure. if either succeeded -- it's a success
if status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
else:
# Not a PR, just broadcast.
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
cb_result = False
if result:
status, msg = result
if status:
cb_result = True
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
if callback:
callback(cb_result)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.cointext = None
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
# New! Payment requests have an optional (may not be there!) attribute
# 'request_ok_callback' which takes 0 args and is called on request ok
# This facility was needed to do the CoinTextPopup label properly.
cb = getattr(self.payment_request, 'request_ok_callback', None)
if callable(cb):
cb()
def payment_request_error(self):
request_error = (self.payment_request and self.payment_request.error) or ''
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False, detail_text=request_error)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr, strict=True)
except web.ExtraParametersInURIWarning as e:
out = e.args[0] # out dict is in e.args[0]
extra_params = e.args[1:]
self.show_warning(ngettext('Extra parameter in URI was ignored:\n\n{extra_params}',
'Extra parameters in URI were ignored:\n\n{extra_params}',
len(extra_params)
).format(extra_params=', '.join(extra_params)))
# fall through ...
except web.BadURIParameter as e:
extra_info = (len(e.args) > 1 and str(e.args[1])) or ''
self.print_error('Bad URI Parameter:', *[repr(i) for i in e.args])
if extra_info:
extra_info = '\n\n' + extra_info # prepend newlines
self.show_error(_('Bad parameter: {bad_param_name}{extra_info}').format(bad_param_name=e.args[0], extra_info=extra_info))
return
except web.DuplicateKeyInURIError as e:
# this exception always has a translated message as args[0]
# plus a list of keys as args[1:], see web.parse_URI
self.show_error(e.args[0] + ":\n\n" + ', '.join(e.args[1:]))
return
except Exception as e:
self.show_error(_('Invalid bitcoincash URI:') + '\n\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address or URI.strip().lower().split(':', 1)[0] in web.parseable_schemes():
# if address, set the payto field to the address.
# if *not* address, then we set the payto field to the empty string
# only IFF it was bitcoincash: and/or cashacct:, see issue #1131.
self.payto_e.setText(address or '')
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
if address and URI.lower().startswith(cashacct.URI_SCHEME + ':'):
# this is important so that cashacct: URIs get insta-resolved
# (they only get resolved when payto_e loses focus)
self.message_e.setFocus()
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
KillPopupLabel("CoinTextPopup") # just in case it was alive
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.cointext = None
self.payto_e.is_pr = False
self.payto_e.is_alias, self.payto_e.validated = False, False # clear flags to avoid bad things
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.payto_e.setHidden(False)
self.payto_label.setHidden(False)
self.max_button.setDisabled(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_rawhex_cb.setDisabled(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = ButtonsLineEdit()
cash_address.addCopyButton()
cash_address.setReadOnly(True)
legacy_address = ButtonsLineEdit()
legacy_address.addCopyButton()
legacy_address.setReadOnly(True)
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
label = QLabel(_('&Address to convert'))
label.setBuddy(source_address)
grid.addWidget(label, 0, 0)
grid.addWidget(source_address, 0, 1)
label = QLabel(_('&Cash address'))
label.setBuddy(cash_address)
grid.addWidget(label, 1, 0)
grid.addWidget(cash_address, 1, 1)
label = QLabel(_('&Legacy address'))
label.setBuddy(legacy_address)
grid.addWidget(label, 2, 0)
grid.addWidget(legacy_address, 2, 1)
w.setLayout(grid)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
coins = []
if self.pay_from:
coins = self.pay_from.copy()
else:
coins = self.wallet.get_spendable_coins(None, self.config, isInvoice)
run_hook("spendable_coin_filter", self, coins) # may modify coins -- used by CashShuffle if in shuffle = ENABLED mode.
if self.pay_from:
# coins may have been filtered, so indicate this in the UI
self.redraw_from_list(spendable=coins)
return coins
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
run_hook('on_spend_coins', self, coins) # CashShuffle: will set the mode of send tab to coins[0]'s shuffled/unshuffled state
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, contacts : List[Contact]):
paytos = []
for contact in contacts:
s = self.get_contact_payto(contact)
if s is not None: paytos.append(s)
self.payto_payees(paytos)
def payto_payees(self, payees : List[str]):
''' Like payto_contacts except it accepts a list of free-form strings
rather than requiring a list of Contacts objects '''
self.show_send_tab()
if len(payees) == 1:
self.payto_e.setText(payees[0])
self.amount_e.setFocus()
else:
text = "\n".join([payee + ", 0" for payee in payees])
self.payto_e.setText(text)
self.payto_e.setFocus()
def resolve_cashacct(self, name):
''' Throws up a WaitingDialog while it resolves a Cash Account.
Goes out to network, verifies all tx's.
Returns: a tuple of: (Info, Minimally_Encoded_Formatted_AccountName)
Argument `name` should be a Cash Account name string of the form:
name#number.123
name#number
name#number.; etc
If the result would be ambigious, that is considered an error, so enough
of the account name#number.collision_hash needs to be specified to
unambiguously resolve the Cash Account.
On failure throws up an error window and returns None.'''
return cashacctqt.resolve_cashacct(self, name)
def set_contact(self, label, address, typ='address', replace=None) -> Contact:
''' Returns a reference to the newly inserted Contact object.
replace is optional and if specified, replace an existing contact,
otherwise add a new one.
Note that duplicate contacts will not be added multiple times, but in
that case the returned value would still be a valid Contact.
Returns None on failure.'''
assert typ in ('address', 'cashacct')
contact = None
if typ == 'cashacct':
tup = self.resolve_cashacct(label) # this displays an error message for us
if not tup:
self.contact_list.update() # Displays original
return
info, label = tup
address = info.address.to_ui_string()
contact = Contact(name=label, address=address, type=typ)
elif not Address.is_valid(address):
# Bad 'address' code path
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return
else:
# Good 'address' code path...
contact = Contact(name=label, address=address, type=typ)
assert contact
if replace != contact:
if self.contacts.has(contact):
self.show_error(_(f"A contact named {contact.name} with the same address and type already exists."))
self.contact_list.update()
return replace or contact
self.contacts.add(contact, replace_old=replace, unique=True)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact2', contact, replace)
return contact
def delete_contacts(self, contacts):
n = len(contacts)
qtext = ''
if n <= 3:
def fmt(contact):
if len(contact.address) > 20:
addy = contact.address[:10] + '…' + contact.address[-10:]
else:
addy = contact.address
return f"{contact.name} <{addy}>"
names = [fmt(contact) for contact in contacts]
contact_str = ", ".join(names)
qtext = _("Remove {list_of_contacts} from your contact list?").format(list_of_contacts = contact_str)
else:
# Note: we didn't use ngettext here for plural check because n > 1 in this branch
qtext = _("Remove {number_of_contacts} contacts from your contact list?").format(number_of_contacts=n)
if not self.question(qtext):
return
removed_entries = []
for contact in contacts:
if self.contacts.remove(contact):
removed_entries.append(contact)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts2', removed_entries)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
ext = pr.export_file_ext()
fn = self.getSaveFileName(_("Save invoice to file"), "*." + ext)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.export_file_data())
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self._search_box_spacer = QWidget()
self._search_box_spacer.setFixedWidth(6) # 6 px spacer
self.search_box = QLineEdit()
self.search_box.setPlaceholderText(_("Search wallet, {key}F to hide").format(key='Ctrl+' if sys.platform != 'darwin' else '⌘'))
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box, 1)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("An Electron Cash update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.cashshuffle_status_button = StatusBarButton(
self.cashshuffle_icon(),
'', # ToolTip will be set in update_cashshuffle code
self.cashshuffle_icon_leftclick
)
self.cashshuffle_toggle_action = QAction("", self.cashshuffle_status_button) # action text will get set in update_cashshuffle_icon()
self.cashshuffle_toggle_action.triggered.connect(self.toggle_cashshuffle)
self.cashshuffle_settings_action = QAction("", self.cashshuffle_status_button)
self.cashshuffle_settings_action.triggered.connect(self.show_cashshuffle_settings)
self.cashshuffle_viewpools_action = QAction(_("View pools..."), self.cashshuffle_status_button)
self.cashshuffle_viewpools_action.triggered.connect(self.show_cashshuffle_pools)
self.cashshuffle_status_button.addAction(self.cashshuffle_viewpools_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_settings_action)
self.cashshuffle_separator_action = sep = QAction(self.cashshuffle_status_button); sep.setSeparator(True)
self.cashshuffle_status_button.addAction(sep)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.setContextMenuPolicy(Qt.ActionsContextMenu)
sb.addPermanentWidget(self.cashshuffle_status_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
self.update_cashaddr_icon()
sb.addPermanentWidget(self.addr_converter_button)
self.addr_converter_button.setHidden(self.gui_object.is_cashaddr_status_button_hidden())
self.gui_object.cashaddr_status_button_hidden_signal.connect(self.addr_converter_button.setHidden)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak.ref(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf()))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only() and not self.payto_e.cointext)
self.preview_button.setVisible(not self.payto_e.cointext)
self.cointext_button.setVisible(bool(self.payto_e.cointext))
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
run_hook("on_new_password", self, password, new_password)
except BaseException as e:
self.show_error(str(e))
return
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def get_passphrase_dialog(self, msg : str, title : str = None, *, permit_empty = False) -> str:
from .password_dialog import PassphraseDialog
d = PassphraseDialog(self.wallet, self.top_level_window(), msg, title, permit_empty = permit_empty)
return d.run()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.balance_label.setHidden(True)
self.statusBar().insertWidget(0, self._search_box_spacer)
self._search_box_spacer.show()
self.search_box.setFocus(1)
if self.search_box.text():
self.do_search(self.search_box.text())
else:
self._search_box_spacer.hide()
self.statusBar().removeWidget(self._search_box_spacer)
self.balance_label.setHidden(False)
self.do_search('')
def do_search(self, t):
'''Apply search text to all tabs. FIXME: if a plugin later is loaded
it will not receive the search filter -- but most plugins I know about
do not support searchable_list anyway, so hopefully it's a non-issue.'''
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
try:
tab.searchable_list.filter(t)
except (AttributeError, TypeError):
pass
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(350)
line2 = QLineEdit()
line2.setFixedWidth(350)
grid.addWidget(QLabel(_("Name")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Address")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
name = line1.text().strip()
address = line2.text().strip()
prefix = networks.net.CASHADDR_PREFIX.lower() + ':'
if address.lower().startswith(prefix):
address = address[len(prefix):]
self.set_contact(name, address)
def lookup_cash_account_dialog(self):
blurb = "<br><br>" + _('Enter a string of the form <b>name#<i>number</i></b>')
cashacctqt.lookup_cash_account_dialog(self, self.wallet, blurb=blurb,
add_to_contacts_button = True, pay_to_button = True)
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self.top_level_window(), seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
pk_lbl = QLabel(_("Private key") + ':')
vbox.addWidget(pk_lbl)
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
# BIP38 Encrypt Button
def setup_encrypt_button():
encrypt_but = QPushButton(_("Encrypt BIP38") + "...")
f = encrypt_but.font(); f.setPointSize(f.pointSize()-1); encrypt_but.setFont(f) # make font -= 1
encrypt_but.setEnabled(bool(bitcoin.Bip38Key.canEncrypt()))
encrypt_but.setToolTip(_("Encrypt this private key using BIP38 encryption")
if encrypt_but.isEnabled() else
_("BIP38 encryption unavailable: install pycryptodomex to enable"))
border_color = ColorScheme.DEFAULT.as_color(False)
border_color.setAlphaF(0.65)
encrypt_but_ss_en = (
keys_e.styleSheet() + (("QPushButton { border: 1px solid %s; border-radius: 6px; padding: 2px; margin: 2px; } "
"QPushButton:hover { border: 1px solid #3daee9; } "
"QPushButton:disabled { border: 1px solid transparent; ") % (border_color.name(QColor.HexArgb)))
)
encrypt_but_ss_dis = ( keys_e.styleSheet() )
encrypt_but.setStyleSheet(encrypt_but_ss_en if encrypt_but.isEnabled() else encrypt_but_ss_dis)
def on_encrypt():
passphrase = self.get_passphrase_dialog(
msg = (
_("Specify a passphrase to use for BIP38 encryption.") + "\n" +
_("Save this passphrase if you save the generated key so you may decrypt it later.")
)
)
if not passphrase:
return
try:
bip38 = str(bitcoin.Bip38Key.encrypt(pk, passphrase))
keys_e.setText(bip38)
encrypt_but.setEnabled(False)
encrypt_but.setStyleSheet(encrypt_but_ss_dis)
pk_lbl.setText( _("BIP38 Key") + ":" )
self.show_message(_("WIF key has been encrypted using BIP38.\n\n"
"You may save this encrypted key to a file or print out its QR code and/or text.\n\n"
"It is strongly encrypted with the passphrase you specified and safe to store electronically. "
"However, the passphrase should be stored securely and not shared with anyone."))
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
encrypt_but.clicked.connect(on_encrypt)
keys_e.addWidget(encrypt_but, 0)
setup_encrypt_button()
# /BIP38 Encrypt Button
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
msg_sign = ( _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' +
_('The operation is undefined. Not just in Electron Cash, but in general.') )
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + msg_sign)
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a bitcoincash URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None, tx_desc=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx, tx_desc=tx_desc)
def export_bip38_dialog(self):
''' Convenience method. Simply calls self.export_privkeys_dialog(bip38=True) '''
self.export_privkeys_dialog(bip38 = True)
@protected
def export_privkeys_dialog(self, password, *, bip38=False):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
if bip38:
self.show_error(_('WARNING: This is a multi-signature wallet.') + '\n' +
_("It cannot be used with BIP38 encrypted keys."))
return
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
if bip38:
if not bitcoin.Bip38Key.canEncrypt() or not bitcoin.Bip38Key.isFast():
self.show_error(_("BIP38 Encryption is not available. Please install 'pycryptodomex' and restart Electron Cash to enable BIP38."))
return
passphrase = self.get_passphrase_dialog(
msg = (
_("You are exporting your wallet's private keys as BIP38 encrypted keys.") + "\n\n" +
_("You must specify a passphrase to use for encryption.") + "\n" +
_("Save this passphrase so you may decrypt your BIP38 keys later.")
)
)
if not passphrase:
# user cancel
return
bip38 = passphrase # overwrite arg with passphrase.. for use down below ;)
class MyWindowModalDialog(WindowModalDialog):
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
d = MyWindowModalDialog(self.top_level_window(), _('Private keys'))
weak_d = Weak.ref(d)
d.setObjectName('WindowModalDialog - Private Key Export')
destroyed_print_error(d) # track object lifecycle
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
lines = [ _("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.") ]
if bip38:
del lines[0] # No need to scream-WARN them since BIP38 *are* encrypted
msg = '\n'.join(lines)
vbox.addWidget(QLabel(msg))
if bip38:
wwlbl = WWLabel()
def set_ww_txt(pf_shown=False):
if pf_shown:
pf_text = ("<font face='{monoface}' size=+1><b>"
+ bip38
+ '</b></font> <a href="hide">{link}</a>').format(link=_("Hide"), monoface=MONOSPACE_FONT)
else:
pf_text = '<a href="show">{link}</a>'.format(link=_("Click to show"))
wwlbl.setText(
_("The below keys are BIP38 <i>encrypted</i> using the passphrase: {passphrase}<br>"
"Please <i>write this passphrase down</i> and store it in a secret place, separate from these encrypted keys."
).format(passphrase=pf_text)
)
def toggle_ww_txt(link):
set_ww_txt(link=="show")
set_ww_txt()
wwlbl.linkActivated.connect(toggle_ww_txt)
vbox.addWidget(wwlbl)
e = QTextEdit()
e.setFont(QFont(MONOSPACE_FONT))
e.setWordWrapMode(QTextOption.NoWrap)
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv' if not bip38 else 'electron-cash-bip38-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
stop = False
def privkeys_thread():
for addr in addresses:
if not bip38:
# This artificial sleep is likely a security / paranoia measure
# to allow user to cancel or to make the process "feel expensive".
# In the bip38 case it's already slow enough so this delay
# is not needed.
time.sleep(0.100)
if stop:
return
try:
privkey = self.wallet.export_private_key(addr, password)
if bip38 and privkey:
privkey = str(bitcoin.Bip38Key.encrypt(privkey, bip38)) # __str__() -> base58 encoded bip38 key
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
private_keys[addr.to_ui_string()] = privkey
strong_d = weak_d()
try:
if strong_d and not stop:
strong_d.computing_privkeys_signal.emit()
else:
return
finally:
del strong_d
if stop:
return
strong_d = weak_d()
if strong_d:
strong_d.show_privkeys_signal.emit()
def show_privkeys():
nonlocal stop
if stop:
return
s = "\n".join('{:45} {}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
stop = True
thr = None
def on_dialog_closed(*args):
nonlocal stop
stop = True
try: d.computing_privkeys_signal.disconnect()
except TypeError: pass
try: d.show_privkeys_signal.disconnect()
except TypeError: pass
try: d.finished.disconnect()
except TypeError: pass
if thr and thr.is_alive():
thr.join(timeout=1.0) # wait for thread to end for maximal GC mojo
def computing_privkeys_slot():
if stop:
return
e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses)))
d.computing_privkeys_signal.connect(computing_privkeys_slot)
d.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
thr = threading.Thread(target=privkeys_thread, daemon=True)
thr.start()
res = d.exec_()
if not res:
stop = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance']]
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.gui_object.is_cashaddr():
return QIcon(":icons/tab_converter.svg")
else:
return QIcon(":icons/tab_converter_bw.svg")
def cashaddr_status_tip(self):
if self.gui_object.is_cashaddr():
return _('Address Format') + ' - ' + _('CashAddr')
else:
return _('Address Format') + ' - ' + _('Legacy')
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
self.addr_converter_button.setStatusTip(self.cashaddr_status_tip())
def toggle_cashaddr_status_bar(self):
self.gui_object.toggle_cashaddr()
self.statusBar().showMessage(self.cashaddr_status_tip(), 2000)
def toggle_cashaddr_settings(self, state):
self.gui_object.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.print_error('*** WARNING ElectrumWindow.toggle_cashaddr: This function is deprecated. Please do not call it!')
self.gui_object.toggle_cashaddr(on)
def cashshuffle_plugin_if_loaded(self):
return self.gui_object.plugins.get_internal_plugin("shuffle", force_load = False)
def is_cashshuffle_enabled(self):
plugin = self.cashshuffle_plugin_if_loaded()
return bool(plugin and plugin.is_enabled() and plugin.window_has_cashshuffle(self))
def cashshuffle_icon(self):
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag == 1:
return QIcon(":icons/cashshuffle_on_error.svg")
else:
return QIcon(":icons/cashshuffle_on.svg")
else:
self._cash_shuffle_flag = 0
return QIcon(":icons/cashshuffle_off.svg")
def update_cashshuffle_icon(self):
self.cashshuffle_status_button.setIcon(self.cashshuffle_icon())
loaded = bool(self.cashshuffle_plugin_if_loaded())
en = self.is_cashshuffle_enabled()
if self._cash_shuffle_flag == 0:
self.cashshuffle_status_button.setStatusTip(_("CashShuffle") + " - " + _("ENABLED") if en else _("CashShuffle") + " - " + _("Disabled"))
rcfcm = _("Right-click for context menu")
self.cashshuffle_status_button.setToolTip(
(_("Toggle CashShuffle") + "\n" + rcfcm)
#(_("Left-click to view pools") + "\n" + rcfcm) if en
#else (_("Toggle CashShuffle") + "\n" + rcfcm)
)
self.cashshuffle_toggle_action.setText(_("Enable CashShuffle") if not en else _("Disable CashShuffle"))
self.cashshuffle_settings_action.setText(_("CashShuffle Settings..."))
self.cashshuffle_viewpools_action.setEnabled(True)
elif self._cash_shuffle_flag == 1: # Network server error
self.cashshuffle_status_button.setStatusTip(_('CashShuffle Error: Could not connect to server'))
self.cashshuffle_status_button.setToolTip(_('Right-click to select a different CashShuffle server'))
self.cashshuffle_settings_action.setText(_("Resolve Server Problem..."))
self.cashshuffle_viewpools_action.setEnabled(False)
self.cashshuffle_settings_action.setVisible(en or loaded)
self.cashshuffle_viewpools_action.setVisible(en)
if en:
# ensure 'Disable CashShuffle' appears at the end of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.addAction(self.cashshuffle_toggle_action)
else:
# ensure 'Enable CashShuffle' appears at the beginning of the context menu
self.cashshuffle_status_button.removeAction(self.cashshuffle_separator_action)
self.cashshuffle_status_button.removeAction(self.cashshuffle_toggle_action)
actions = self.cashshuffle_status_button.actions()
self.cashshuffle_status_button.insertAction(actions[0] if actions else None, self.cashshuffle_separator_action)
self.cashshuffle_status_button.insertAction(self.cashshuffle_separator_action, self.cashshuffle_toggle_action)
def show_cashshuffle_settings(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
msg = None
if self._cash_shuffle_flag == 1:
# had error
msg = _("There was a problem connecting to this server.\nPlease choose a different CashShuffle server.")
p.settings_dialog(self, msg)
#else: # commented-out. Enable this if you want to use the non-modal network settings as the destination for this action
# # no error -- use the free-floating non-modal network dialog
# if not p.show_cashshuffle_tab_in_network_dialog(self):
# # Huh. Network dialog creation/show failed. Fall back to modal window
# p.settings_dialog(self, msg)
def show_cashshuffle_pools(self):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.view_pools(self)
def cashshuffle_icon_leftclick(self):
self.toggle_cashshuffle()
return
# delete the above 2 lines if we want the left-click to revert to
# Josh's suggestion (leaving the code in here for now)
if self.is_cashshuffle_enabled():
if self._cash_shuffle_flag != 0:
# Jump to settings.
self.cashshuffle_settings_action.trigger()
return
if self.cashshuffle_viewpools_action.isVisible():
# New! We just let this icon be the "View pools..." action when
# the plugin is already loaded and enabled. This hopefully will
# discourage disabling. Also it's been found that "View pools..."
# is the most popular action anyway -- might as well make it
# convenient to access with 1-click. (@zquestz suggested this)
self.cashshuffle_viewpools_action.trigger()
return
#else... in all other cases just toggle cashshuffle
self.toggle_cashshuffle()
def toggle_cashshuffle(self):
if not self.is_wallet_cashshuffle_compatible():
self.show_warning(_("This wallet type cannot be used with CashShuffle."), parent=self)
return
plugins = self.gui_object.plugins
p0 = self.cashshuffle_plugin_if_loaded()
p = p0 or plugins.enable_internal_plugin("shuffle")
if not p:
raise RuntimeError("Could not find CashShuffle plugin")
was_enabled = p.window_has_cashshuffle(self)
if was_enabled and not p.warn_if_shuffle_disable_not_ok(self):
# user at nag screen said "no", so abort
self.update_cashshuffle_icon()
return
enable_flag = not was_enabled
self._cash_shuffle_flag = 0
KillPopupLabel("CashShuffleError")
if not p0:
# plugin was not loaded -- so flag window as wanting cashshuffle and do init
p.window_set_wants_cashshuffle(self, enable_flag)
p.init_qt(self.gui_object)
else:
# plugin was already started -- just add the window to the plugin
p.window_set_cashshuffle(self, enable_flag)
self.update_cashshuffle_icon()
self.statusBar().showMessage(self.cashshuffle_status_button.statusTip(), 3000)
if enable_flag and self.config.get("show_utxo_tab") is None:
self.toggle_tab(self.utxo_tab) # toggle utxo tab to 'on' if user never specified it should be off.
def settings_dialog(self):
class SettingsModalDialog(WindowModalDialog):
shown_signal = pyqtSignal()
def showEvent(self, e):
super().showEvent(e)
self.shown_signal.emit()
self.need_restart = False
dialog_finished = False
d = SettingsModalDialog(self.top_level_window(), _('Preferences'))
d.setObjectName('WindowModalDialog - Preferences')
destroyed_print_error(d)
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
self.fee_slider_mogrifier()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom Fee Rate'), _('Custom Fee Rate in Satoshis per byte'))
fee_widgets.append((customfee_label, customfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
# this ensures that even if exception occurs or we exit function early,
# the signal is disconnected
disconnect_alias_received_signal = Weak.finalize(d, self.alias_received_signal.disconnect, set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = util.base_unit_labels # ( 'BCH', 'mBCH', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_label = HelpLabel(_('Video Device'), '')
qr_did_scan = False
def set_no_camera(e=''):
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label.setText(_('Video Device') + ' ' + _('(disabled)') + ':')
qr_label.help_text = qr_combo.toolTip() + "\n\n" + str(e)
qr_label.setToolTip(qr_combo.toolTip())
def scan_cameras():
nonlocal qr_did_scan
if qr_did_scan or dialog_finished: # dialog_finished guard needed because QueuedConnection
# already scanned or dialog finished quickly
return
qr_did_scan = True
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
except ImportError as e:
set_no_camera(e)
return
system_cameras = QCameraInfo.availableCameras()
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_label.setText(_('Video Device') + ':')
qr_label.help_text = _("For scanning Qr codes.")
qr_combo.setToolTip(qr_label.help_text)
qr_label.setToolTip(qr_label.help_text)
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = max(0, qr_combo.findData(video_device)) # if not found, default to 0 (the default item)
qr_combo.setCurrentIndex(video_device_index)
qr_combo.setEnabled(True)
def on_video_device(x):
if qr_combo.isEnabled():
self.config.set_key("video_device", qr_combo.itemData(x), True)
set_no_camera() # pre-populate combo box with default so it has a sizeHint
d.shown_signal.connect(scan_cameras, Qt.QueuedConnection) # do the camera scan once dialog is shown, using QueuedConnection so it's called from top level event loop and not from the showEvent handler
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(_("Dark theme is not available. Please install QDarkStyle to access this feature."))
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
if sys.platform in ('win32', 'cygwin'):
# Enable/Disable the use of the FreeType library on Qt
# (Windows only)
freetype_chk = QCheckBox(_('Use FreeType for font rendering'))
freetype_chk.setChecked(self.gui_object.windows_qt_use_freetype)
freetype_chk.setEnabled(self.config.is_modifiable('windows_qt_use_freetype'))
freetype_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_freetype_chk():
self.gui_object.windows_qt_use_freetype = freetype_chk.isChecked() # property has a method backing it
self.need_restart = True
freetype_chk.stateChanged.connect(on_freetype_chk)
gui_widgets.append((freetype_chk, None))
elif sys.platform in ('linux',):
# Enable/Disable the use of the fonts.xml FontConfig override
# (Linux only)
fontconfig_chk = QCheckBox(_('Use custom fontconfig for emojis'))
fontconfig_chk.setChecked(self.gui_object.linux_qt_use_custom_fontconfig)
fontconfig_chk.setEnabled(self.config.is_modifiable('linux_qt_use_custom_fontconfig'))
fontconfig_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_fontconfig_chk():
self.gui_object.linux_qt_use_custom_fontconfig = fontconfig_chk.isChecked() # property has a method backing it
self.need_restart = True
fontconfig_chk.stateChanged.connect(on_fontconfig_chk)
gui_widgets.append((fontconfig_chk, None))
# CashAddr control
gui_widgets.append((None, None)) # spacer
address_w = QGroupBox(_('Address Format'))
address_w.setToolTip(_('Select between Cash Address and Legacy formats for addresses'))
hbox = QHBoxLayout(address_w)
cashaddr_cbox = QComboBox()
cashaddr_cbox.addItem(QIcon(':icons/tab_converter.svg'), _("CashAddr"), Address.FMT_CASHADDR)
cashaddr_cbox.addItem(QIcon(':icons/tab_converter_bw.svg'), _("Legacy"), Address.FMT_LEGACY)
cashaddr_cbox.setCurrentIndex(0 if self.gui_object.is_cashaddr() else 1)
def cashaddr_cbox_handler(ignored_param):
fmt = int(cashaddr_cbox.currentData())
self.gui_object.toggle_cashaddr(fmt == Address.FMT_CASHADDR)
cashaddr_cbox.currentIndexChanged.connect(cashaddr_cbox_handler)
hbox.addWidget(cashaddr_cbox)
toggle_cashaddr_control = QCheckBox(_('Hide status button'))
toggle_cashaddr_control.setToolTip(_('If checked, the status bar button for toggling address formats will be hidden'))
toggle_cashaddr_control.setChecked(self.gui_object.is_cashaddr_status_button_hidden())
toggle_cashaddr_control.toggled.connect(self.gui_object.set_cashaddr_status_button_hidden)
hbox.addWidget(toggle_cashaddr_control)
gui_widgets.append((address_w, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of Electron Cash becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Enable Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([pgettext('Referencing Fiat currency', 'None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(gui_widgets, _('General')),
(fee_widgets, _('Fees')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
try:
# run the dialog
d.exec_()
finally:
dialog_finished = True # paranoia for scan_cameras
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
disconnect_alias_received_signal() # aka self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_message(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice.
# clean_up() guards against that situation.
self.clean_up()
super().closeEvent(event)
event.accept() # paranoia. be sure it's always accepted.
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
del self.cashaddr_toggled_signal # delete alias so it doesn interfere with below
for attr_name in dir(self):
if attr_name.endswith("_signal"):
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
# The below shouldn't even be needed, since Qt should take care of this,
# but Axel Gembe got a crash related to this on Python 3.7.3, PyQt 5.12.3
# so here we are. See #1531
try: self.gui_object.cashaddr_toggled_signal.disconnect(self.update_cashaddr_icon)
except TypeError: pass
try: self.gui_object.cashaddr_toggled_signal.disconnect(self.update_receive_address_widget)
except TypeError: pass
try: self.gui_object.cashaddr_status_button_hidden_signal.disconnect(self.addr_converter_button.setHidden)
except TypeError: pass
try: self.gui_object.update_available_signal.disconnect(self.on_update_available)
except TypeError: pass
try: self.disconnect()
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# Status bar holds references to self, so clear it to help GC this window
self.setStatusBar(None)
# Note that due to quirks on macOS and the shared menu bar, we do *NOT*
# clear the menuBar. Instead, doing this causes the object to get
# deleted and/or its actions (and more importantly menu action hotkeys)
# to go away immediately.
self.setMenuBar(None)
# Disable shortcuts immediately to prevent them from accidentally firing
# on us after we are closed. They will get deleted when this QObject
# is finally deleted by Qt.
for shortcut in self._shortcuts:
shortcut.setEnabled(False)
del shortcut
self._shortcuts.clear()
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget, QAction, TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame, QShortcut)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
if self.cleaned_up:
return
self.cleaned_up = True
if self.wallet.thread: # guard against window close before load_wallet was called (#1554)
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
for w in [self.address_list, self.history_list, self.utxo_list, self.cash_account_e, self.contact_list,
self.tx_update_mgr]:
if w: w.clean_up() # tell relevant object to clean itself up, unregister callbacks, disconnect signals, etc
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(parent=self.top_level_window(), title=_('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stderr)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel(_('{total_size} bytes').format(total_size=total_size)), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def is_wallet_cashshuffle_compatible(self):
from electroncash.wallet import ImportedWalletBase, Multisig_Wallet
if (self.wallet.is_watching_only()
or self.wallet.is_hardware()
or isinstance(self.wallet, (Multisig_Wallet, ImportedWalletBase))):
# wallet is watching-only, multisig, or hardware so.. not compatible
return False
return True
_cs_reminder_pixmap = None
def do_cash_shuffle_reminder(self):
if not self.remind_cashshuffle_enabled:
# NB: This is now disabled. We return early from this function.
# Amaury recommended we do this prompting/reminder in a future
# release after the initial public release, or we roll it out
# for a subset of users (hence this flag).
return
if self.cleaned_up or not self.wallet or not self.is_wallet_cashshuffle_compatible():
return
from electroncash_plugins.shuffle.conf_keys import ConfKeys
p = self.cashshuffle_plugin_if_loaded()
storage = self.wallet.storage
cashshuffle_flag = storage.get(ConfKeys.PerWallet.ENABLED, False)
enabled = cashshuffle_flag and p and p.is_enabled()
nagger_answer = storage.get(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, None)
if not enabled:
if nagger_answer is None: # nagger_answer is None if they've never said "Never ask"
if __class__._cs_reminder_pixmap is None:
# lazy init. Cache it to class level.
size = QSize(150, int(150/1.4419)) # Important to preserve aspect ratio in .svg file here
# NB: doing it this way, with a QIcon, will take into account devicePixelRatio and end up possibly producing a very hi quality image from the SVG, larger than size
__class__._cs_reminder_pixmap = QIcon(":icons/CashShuffleLogos/logo-vertical.svg").pixmap(size)
icon = __class__._cs_reminder_pixmap
message = '''
<big>{}</big></b>
<p>{}</p>
'''.format(_("CashShuffle is disabled for this wallet.") if not cashshuffle_flag else _("CashShuffle is disabled."),
_("Would you like to enable CashShuffle for this wallet?"))
info = ' '.join([_("If you enable it, Electron Cash will shuffle your coins for greater <b>privacy</b>. However, you will pay fractions of a penny per shuffle in transaction fees."),
_("(You can always toggle it later using the CashShuffle button.)")])
res, chkd = self.msg_box(icon=icon,
parent=self.top_level_window(),
title=_('Would you like to turn on CashShuffle?'),
text=message, rich_text=True, informative_text=info,
checkbox_text=_("Never ask for this wallet"),
buttons=(_('Enable CashShuffle'), _("Not now")),
defaultButton=_('Enable CashShuffle'), escapeButton=("Not now") )
if chkd:
# they don't want to be asked again, so just remember what they answered and apply this answer each time.
storage.put(ConfKeys.PerWallet.MAIN_WINDOW_NAGGER_ANSWER, bool(res==0))
else:
# They's specified "Never ask", so apply whatever button they pushed when they said that as the auto-setting.
res = 0 if nagger_answer else 1 # if nagge_answer was True, no prompt, just auto-enable, otherwise leave it disabled.
if res == 0:
self.toggle_cashshuffle()
def restart_cashshuffle(self, msg = None, parent = None):
if (parent or self).question("{}{}".format(msg + "\n\n" if msg else "", _("Restart the CashShuffle plugin now?")),
app_modal=True):
p = self.cashshuffle_plugin_if_loaded()
if p:
p.restart_all()
self.notify(_("CashShuffle restarted"))
else:
self.notify(_("CashShuffle could not be restarted"))
_cash_shuffle_flag = 0
def cashshuffle_set_flag(self, flag):
flag = int(flag)
changed = flag != self._cash_shuffle_flag
if not changed:
return
if flag:
def onClick():
KillPopupLabel("CashShuffleError")
self.show_cashshuffle_settings()
ShowPopupLabel(name = "CashShuffleError",
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Server Error"),_("Right-click to resolve")),
target=self.cashshuffle_status_button,
timeout=20000, onClick=onClick, onRightClick=onClick,
dark_mode = ColorScheme.dark_scheme)
else:
KillPopupLabel("CashShuffleError")
self.print_error("Cash Shuffle flag is now {}".format(flag))
oldTip = self.cashshuffle_status_button.statusTip()
self._cash_shuffle_flag = flag
self.update_status()
newTip = self.cashshuffle_status_button.statusTip()
if newTip != oldTip:
self.statusBar().showMessage(newTip, 7500)
def cashshuffle_get_flag(self):
return self._cash_shuffle_flag
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
def _pick_address(self, *, title=None, icon=None) -> Address:
''' Returns None on user cancel, or a valid is_mine Address object
from the Address list. '''
from .address_list import AddressList
# Show user address picker
d = WindowModalDialog(self.top_level_window(), title or _('Choose an address'))
d.setObjectName("Window Modal Dialog - " + d.windowTitle())
destroyed_print_error(d) # track object lifecycle
d.setMinimumWidth(self.width()-150)
vbox = QVBoxLayout(d)
if icon:
hbox = QHBoxLayout()
hbox.setContentsMargins(0,0,0,0)
ic_lbl = QLabel()
ic_lbl.setPixmap(icon.pixmap(50))
hbox.addWidget(ic_lbl)
hbox.addItem(QSpacerItem(10, 1))
t_lbl = QLabel("<font size=+1><b>" + (title or '') + "</b></font>")
hbox.addWidget(t_lbl, 0, Qt.AlignLeft)
hbox.addStretch(1)
vbox.addLayout(hbox)
vbox.addWidget(QLabel(_('Choose an address') + ':'))
l = AddressList(self, picker=True)
try:
l.setObjectName("AddressList - " + d.windowTitle())
destroyed_print_error(l) # track object lifecycle
l.update()
vbox.addWidget(l)
ok = OkButton(d)
ok.setDisabled(True)
addr = None
def on_item_changed(current, previous):
nonlocal addr
addr = current and current.data(0, l.DataRoles.address)
ok.setEnabled(addr is not None)
def on_selection_changed():
items = l.selectedItems()
if items: on_item_changed(items[0], None)
else: on_item_changed(None, None)
l.currentItemChanged.connect(on_item_changed)
cancel = CancelButton(d)
vbox.addLayout(Buttons(cancel, ok))
res = d.exec_()
if res == QDialog.Accepted:
return addr
return None
finally:
l.clean_up() # required to unregister network callback
def register_new_cash_account(self, addr = None):
''' Initiates the "Register a new cash account" dialog.
If addr is none, will use self.receive_address.
Alternatively, you may pass the string 'pick' in lieu of an address
if you want this function to present the user with a UI for choosing
an address to register.'''
if addr == 'pick':
addr = self._pick_address(title=_("Register A New Cash Account"), icon=QIcon(":icons/cashacct-logo.png"))
if addr is None:
return # user cancel
addr = addr or self.receive_address or self.wallet.get_receiving_address()
if not addr:
self.print_error("register_new_cash_account: no receive address specified")
return
def on_link(link):
if link == 'ca':
webopen('https://www.cashaccount.info/')
elif link == 'addr':
if self.wallet.is_mine(addr):
self.show_address(addr)
else:
url = web.BE_URL(self.config, 'addr', addr)
if url: webopen(url)
name, placeholder = '', 'Satoshi_Nakamoto'
while True:
lh = self.wallet.get_local_height()
le = ButtonsLineEdit()
help_msg = '<span style="font-weight:400;">' + \
_('<p>How it works: <b>Cash Accounts</b> registrations work by issuing an <b>OP_RETURN</b> transaction to yourself, costing fractions of a penny.</p>'
'<p>The registrations are permanently written to the blockchain and associate a human-friendly name with your address.</p>'
'<p>After the registration transaction receives <i>1 confirmation</i>, you can use your new <b>Cash Account name</b> as if it were an address and give it out to other people (Electron Cash or another Cash Account enabled wallet is required).</p>'
'<p><span style="font-weight:100;">You will be offered the opportunity to review the generated transaction before broadcasting it to the blockchain.</span></p>') + \
'</span>'
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
help_but = HelpButton(help_msg, button_text='', fixed_size=False, icon=QIcon(qmark), custom_parent=self)
le.addWidget(help_but)
name = line_dialog(self.top_level_window(),
_("Register A New Cash Account"),
(_("You are registering a new <a href='ca'>Cash Account</a> for your address <a href='addr'><b><pre>{address}</pre></b></a>").format(address=addr.to_ui_string())
+ _("The current block height is <b><i>{block_height}</i></b>, so the new cash account will likely look like: <b><u><i>AccountName<i>#{number}</u></b>.")
.format(block_height=lh or '???', number=max(cashacct.bh2num(lh or 0)+1, 0) or '???')
+ "<br><br><br>" + _("Specify the <b>account name</b> below (limited to 99 characters):") ),
_("Proceed to Send Tab"), default=name, linkActivated=on_link,
placeholder=placeholder, disallow_empty=True,
line_edit_widget = le,
icon=QIcon(":icons/cashacct-logo.png"))
if name is None:
# user cancel
return
name = name.strip()
if not cashacct.name_accept_re.match(name):
self.show_error(_("The specified name cannot be used for a Cash Accounts registration. You must specify 1-99 alphanumeric (ASCII) characters, without spaces (underscores are permitted as well)."))
continue
self._reg_new_cash_account(name, addr)
return
def _reg_new_cash_account(self, name, addr):
self.show_send_tab()
self.do_clear()
# Enabled OP_RETURN stuff even if disabled in prefs. Next do_clear call will reset to prefs presets.
self.message_opreturn_e.setVisible(True)
self.opreturn_rawhex_cb.setVisible(True)
self.opreturn_label.setVisible(True)
# Prevent user from modifying required fields, and hide what we
# can as well.
self.message_opreturn_e.setText(cashacct.ScriptOutput.create_registration(name, addr).script[1:].hex())
self.message_opreturn_e.setFrozen(True)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_rawhex_cb.setDisabled(True)
self.amount_e.setAmount(0)
self.amount_e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setHidden(True)
self.payto_label.setHidden(True)
# Set a default description -- this we allow them to edit
self.message_e.setText(
_("Cash Accounts Registration: '{name}' -> {address}").format(
name=name, address=addr.to_ui_string()
)
)
# set up "Helpful Window" informing user registration will
# not be accepted until at least 1 confirmation.
cashaccounts_never_show_send_tab_hint = self.config.get('cashaccounts_never_show_send_tab_hint', False)
if not cashaccounts_never_show_send_tab_hint:
msg1 = (
_("The Send Tab has been filled-in with your <b>Cash Accounts</b> registration data.")
+ "<br><br>" + _("Please review the transaction, save it, and/or broadcast it at your leisure.")
)
msg2 = ( _("After at least <i>1 confirmation</i>, you will be able to use your new <b>Cash Account</b>, and it will be visible in Electron Cash in the <b>Addresses</b> tab.")
)
msg3 = _("If you wish to control which specific coins are used to "
"fund this registration transaction, feel free to use the "
"Coins and/or Addresses tabs' Spend-from facility.\n\n"
"('Spend from' is a right-click menu option in either tab.)")
res = self.msg_box(
# TODO: get SVG icon..
parent = self, icon=QIcon(":icons/cashacct-logo.png").pixmap(75, 75),
title=_('Register A New Cash Account'), rich_text=True,
text = msg1, informative_text = msg2, detail_text = msg3,
checkbox_text=_("Never show this again"), checkbox_ischecked=False
)
if res[1]:
# never ask checked
self.config.set_key('cashaccounts_never_show_send_tab_hint', True)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.cleaned_up = False
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def clean_up(self):
self.cleaned_up = True
main_window_parent = self.weakParent() # weak -> strong ref
if main_window_parent:
try: main_window_parent.history_updated_signal.disconnect(self.verifs_get_and_clear)
except TypeError: pass
try: main_window_parent.on_timer_signal.disconnect(self.do_check)
except TypeError: pass
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
t0 = time.time()
parent.history_list.setUpdatesEnabled(False)
had_sorting = parent.history_list.isSortingEnabled()
if had_sorting:
parent.history_list.setSortingEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI in {:0.2f} ms"
.format(n_updates, len(items), (time.time()-t0)*1e3))
if had_sorting:
parent.history_list.setSortingEnabled(True)
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
txns = self.notifs_get_and_clear()
if txns:
# Combine the transactions
n_ok, n_cashacct, total_amount = 0, 0, 0
last_seen_ca_name = ''
ca_txs = dict() # 'txid' -> ('name', address) -- will be given to contacts_list for "unconfirmed registrations" display
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
for _typ, addr, val in tx.outputs():
# Find Cash Account registrations that are for addresses *in* this wallet
if isinstance(addr, cashacct.ScriptOutput) and parent.wallet.is_mine(addr.address):
n_cashacct += 1
last_seen_ca_name = addr.name
txid = tx.txid_fast()
if txid: ca_txs[txid] = (addr.name, addr.address)
if not is_relevant:
continue
total_amount += v
n_ok += 1
if n_cashacct:
# Unhide the Addresses tab if cash account reg tx seen
# and user never explicitly hid it.
if parent.config.get("show_addresses_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Addresses tab.
parent.toggle_tab(parent.addresses_tab)
# Do same for console tab
if parent.config.get("show_contacts_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Console tab.
parent.toggle_tab(parent.contacts_tab)
if ca_txs:
# Notify contact_list of potentially unconfirmed txs
parent.contact_list.ca_update_potentially_unconfirmed_registrations(ca_txs)
if parent.wallet.storage.get('gui_notify_tx', True):
ca_text = ''
if n_cashacct > 1:
# plural
ca_text = " + " + _("{number_of_cashaccounts} Cash Accounts registrations").format(number_of_cashaccounts = n_cashacct)
elif n_cashacct == 1:
# singular
ca_text = " + " + _("1 Cash Accounts registration ({cash_accounts_name})").format(cash_accounts_name = last_seen_ca_name)
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(max(n_ok, n_cashacct)))
if max(n_ok, n_cashacct) > 1:
parent.notify(_("{} new transactions: {}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
else:
parent.notify(_("New transaction: {}").format(parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
elif n_cashacct:
# No total amount (was just a cashacct reg tx)
ca_text = ca_text[3:] # pop off the " + "
if n_cashacct > 1:
parent.notify(_("{} new transactions: {}")
.format(n_cashacct, ca_text))
else:
parent.notify(_("New transaction: {}").format(ca_text))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.